コード例 #1
0
static int audio_read_header(AVFormatContext *s1)
{
    AudioData *s = s1->priv_data;
    AVStream *st;
    int ret;

    st = avformat_new_stream(s1, NULL);
    if (!st) {
        return AVERROR(ENOMEM);
    }

    ret = audio_open(s1, 0, s1->filename);
    if (ret < 0) {
        return AVERROR(EIO);
    }

    /* take real parameters */
    st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
    st->codec->codec_id = s->codec_id;
    st->codec->sample_rate = s->sample_rate;
    st->codec->channels = s->channels;

    avpriv_set_pts_info(st, 64, 1, 1000000);  /* 64 bits pts in us */
    return 0;
}
コード例 #2
0
int flowm_audio_stream_chunk(const cst_wave *w, int start, int size, 
                             int last, cst_audio_streaming_info *asi)
{

    if (fl_ad == NULL)
    {
        fl_ad = audio_open(w->sample_rate,w->num_channels,CST_AUDIO_LINEAR16);
    }

    if (flowm_play_status == FLOWM_PLAY)
    {
        audio_write(fl_ad,&w->samples[start],size*sizeof(short));
        return CST_AUDIO_STREAM_CONT;
    }
    else if (flowm_play_status == FLOWM_BENCH)
    {   /* Do TTS but don't actually play it */
        /* How much have we played */
        flowm_duration += (size*1.0)/w->sample_rate;
        return CST_AUDIO_STREAM_CONT;
    }
    else
    {   /* for STOP, and the SKIPS (if they get here) */
        return CST_AUDIO_STREAM_STOP;
    }
}
コード例 #3
0
static int audio_read_header(AVFormatContext *s1, AVFormatParameters *ap)
{
    AudioData *s = s1->priv_data;
    AVStream *st;
    int ret;

#if FF_API_FORMAT_PARAMETERS
    if (ap->sample_rate > 0)
        s->sample_rate = ap->sample_rate;
    if (ap->channels > 0)
        s->channels = ap->channels;
#endif

    st = av_new_stream(s1, 0);
    if (!st) {
        return AVERROR(ENOMEM);
    }

    ret = audio_open(s1, 0, s1->filename);
    if (ret < 0) {
        return AVERROR(EIO);
    }

    /* take real parameters */
    st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
    st->codec->codec_id = s->codec_id;
    st->codec->sample_rate = s->sample_rate;
    st->codec->channels = s->channels;

    av_set_pts_info(st, 64, 1, 1000000);  /* 64 bits pts in us */
    return 0;
}
コード例 #4
0
ファイル: freebsdplay.c プロジェクト: AhmadTux/freebsd-ports
int
audio_init(int argc, char *argv[])
{
 int rate_set = 0;
 int use_audio = 1;

 prog = argv[0];

 argc = getargs("freebsd Audio",argc, argv,
                "r", "%d", &rate_set,    "Sample rate",
                "a", NULL, &use_audio,   "Audio enable",
                NULL);

 if (help_only)
  return argc;

 if (use_audio)
  audio_open();

 if (rate_set)
  samp_rate = rate_set;

 if (dev_fd > 0)
  {
   ioctl(dev_fd, SNDCTL_DSP_SPEED, &samp_rate);
   printf("Actual sound rate: %ld\n", samp_rate);
  }

 return argc;
}
コード例 #5
0
ファイル: audio.c プロジェクト: braincat/uwin
int dsp_ioctl(int fd, int arg, int *val)
{
	int ret=0;
	Pdev_t *pdev=  dev_ptr(getfdp(P_CP,fd)->devno);
	WAVEFORMATEX *wp = (WAVEFORMATEX*)(pdev+1);
	if(*val<0)
		ret = -1;
	else if(arg==SNDCTL_DSP_SPEED)
	{
		wp->nSamplesPerSec=*val;
		wp->nAvgBytesPerSec= (*val) * wp->nBlockAlign;
	}
	else if(arg==SNDCTL_DSP_CHANNELS)
	{
		wp->nChannels=*val;
		wp->nBlockAlign=(wp->wBitsPerSample * (*val))/8 ;
		wp->nAvgBytesPerSec=wp->nSamplesPerSec*wp->nBlockAlign;
	}
	else
		ret = -1;
	if(ret==0 && !audio_open(pdev,1))
	{
		logerr(LOG_DEV+5, "waveOutOpen");
		ret = -1;
	}
	return(ret);
}
コード例 #6
0
ファイル: main.c プロジェクト: awersatos/AD
void main(void)
{
    int i, j;
    audio = audio_open(AUDIO_1);
    leds  = led_open(LEDS);
    led_set_all_on_intensity(leds, 0);
    led_turn_all_on(leds);

    while (1)
    {
        // get audio to left and right buffers for processing.
        get_audio(stereo_buf, AUDIO_BUF_SIZE);
        for (i = 0, j = 0; i < AUDIO_BUF_SIZE ; i++, j++)
        {
          audio_buf_r[j] = stereo_buf[i++];
          audio_buf_l[j] = stereo_buf[i];
        }

        // Loop left and right channels through.
        put_audio(stereo_buf, AUDIO_BUF_SIZE);

        // Get the D.C. average value of the wavelet.
        intensity_l = abs_ave(audio_buf_l, AUDIO_BUF_SIZE/2);
        intensity_r = abs_ave(audio_buf_r, AUDIO_BUF_SIZE/2);

        // Set VU meter on LEDS
        update_intensity(intensity_l, LEFT);
        update_intensity(intensity_r, RIGHT);
    }
}
コード例 #7
0
ファイル: audio.c プロジェクト: Zolta/avbin
static int audio_read_header(AVFormatContext *s1, AVFormatParameters *ap)
{
    AudioData *s = s1->priv_data;
    AVStream *st;
    int ret;

    if (ap->sample_rate <= 0 || ap->channels <= 0)
        return -1;

    st = av_new_stream(s1, 0);
    if (!st) {
        return AVERROR(ENOMEM);
    }
    s->sample_rate = ap->sample_rate;
    s->channels = ap->channels;

    ret = audio_open(s, 0, s1->filename);
    if (ret < 0) {
        av_free(st);
        return AVERROR(EIO);
    }

    /* take real parameters */
    st->codec->codec_type = CODEC_TYPE_AUDIO;
    st->codec->codec_id = s->codec_id;
    st->codec->sample_rate = s->sample_rate;
    st->codec->channels = s->channels;

    av_set_pts_info(st, 64, 1, 1000000);  /* 64 bits pts in us */
    return 0;
}
コード例 #8
0
ファイル: msu1.cpp プロジェクト: quequotion/higan-qq
void MSU1::mmio_write(unsigned addr, uint8 data) {
  cpu.synchronize_coprocessors();
  addr = 0x2000 | (addr & 7);

  switch(addr) {
  case 0x2000: mmio.data_seek_offset = (mmio.data_seek_offset & 0xffffff00) | (data <<  0); break;
  case 0x2001: mmio.data_seek_offset = (mmio.data_seek_offset & 0xffff00ff) | (data <<  8); break;
  case 0x2002: mmio.data_seek_offset = (mmio.data_seek_offset & 0xff00ffff) | (data << 16); break;
  case 0x2003: mmio.data_seek_offset = (mmio.data_seek_offset & 0x00ffffff) | (data << 24);
    mmio.data_read_offset = mmio.data_seek_offset;
    data_open();
    break;
  case 0x2004: mmio.audio_track = (mmio.audio_track & 0xff00) | (data << 0); break;
  case 0x2005: mmio.audio_track = (mmio.audio_track & 0x00ff) | (data << 8);
    mmio.audio_play_offset = 8;
    audio_open();
    break;
  case 0x2006: mmio.audio_volume = data; break;
  case 0x2007:
    if(mmio.audio_busy) break;
    if(mmio.audio_error) break;
    mmio.audio_repeat = data & 2;
    mmio.audio_play   = data & 1;
    break;
  }
}
コード例 #9
0
ファイル: audio.c プロジェクト: braincat/uwin
static HANDLE open_dsp(Devtab_t* dp, Pfd_t* fdp, Path_t *ip, int oflags, HANDLE *extra)
{
	HANDLE hp;
	int blkno, minor = ip->name[1];
	Pdev_t *pdev;
	unsigned short *blocks = devtab_ptr(Share->chardev_index, AUDIO_MAJOR);

	if(load_audio())
	{

		/* If the device is already opened */
		if(blkno = blocks[minor])
		{
			logerr(LOG_DEV+5, "Device Busy");
			errno = EBUSY;
			return 0;
		}
		else
		{
			WAVEFORMATEX *wp;
			if((blkno = block_alloc(BLK_PDEV)) == 0)
				return(0);
			pdev = dev_ptr(blkno);
			wp = (WAVEFORMATEX*)(pdev+1);
			ZeroMemory((void *)pdev, BLOCK_SIZE-1);
			/* Initialising the wave format sturcture */
			wp->wFormatTag=WAVE_FORMAT_PCM;
			wp->nChannels=CHANNELS;
			wp->nSamplesPerSec=SAMPLES_PER_SEC;
			if(minor&1)
				wp->nSamplesPerSec *= 2;
			wp->wBitsPerSample=BITS_PER_SAMPLE;
			wp->nBlockAlign=(wp->wBitsPerSample*CHANNELS)/8 ;
			wp->nAvgBytesPerSec=wp->nSamplesPerSec*wp->nBlockAlign;
			wp->cbSize=EXTRA_FORMAT_SIZE;
			if(!audio_open(pdev,1))

			{
				logerr(LOG_DEV+5, "waveOutOpen");
				block_free((unsigned short)blkno);
				return 0;
			}
			hp = AUDIO_HANDLE;
			pdev->major=AUDIO_MAJOR;
			pdev->minor = minor;
			uwin_pathmap(ip->path, pdev->devname, sizeof(pdev->devname), UWIN_W2U);

			fdp->devno = blkno;
			blocks[minor] = blkno;
			pdev->devpid = P_CP->pid;
		}
		return hp;
	}
	else
	{
		logerr(0, "audio functions not supported");
		return 0;
	}
}
コード例 #10
0
ファイル: openbsdplay.c プロジェクト: Bluerise/bitrig-ports
int
audio_init(int argc, char *argv[])
{
 int rate_set = 0;
 int use_audio = 1;

 prog = argv[0];

 argc = getargs("OpenBSD Audio",argc, argv,
                "r", "%d", &rate_set,    "Sample rate",
                "a", NULL, &use_audio,   "Audio enable",
                NULL);

 if (help_only)
  return argc;

 if (rate_set)
  samp_rate = rate_set;

 if (!use_audio)
  return argc;

 audio_open();

 sio_initpar(&par);
 par.bits = 16;
 par.sig = 1;
 par.rate = samp_rate;
 par.pchan = 1;

 if (!sio_setpar(hdl, &par) || !sio_getpar(hdl, &par))
  {
   fprintf(stderr, "error setting sndio parameters\n");
   hdl = NULL;
  }
 else
  {
   if (par.bits != 16 || par.sig != 1 || par.pchan != 1 || par.rate != samp_rate)
   {
    fprintf(stderr, "returned incorrect sndio parameters\n");
    hdl = NULL;
   }
  }

 if (hdl && !sio_start(hdl))
  {
   fprintf(stderr, "error starting sndio\n");
   hdl = NULL;
  }

 return argc;
}
コード例 #11
0
ファイル: audio.c プロジェクト: BackupTheBerlios/bonephone
static int
audio_device_attempt_config(session_t *sp, audio_config *config)
{
        audio_format *inf, *ouf;
        const codec_format_t *incf;
        int success;

        incf = codec_get_format(config->primary);
        assert(incf);
        
        inf = audio_format_dup(&incf->format);
        ouf = audio_format_dup(&incf->format);

        if (inf->channels != 2 && config->render_3d) {
                /* If 3d rendering is enabled we need stereo output
                 * format. 
                 */
                ouf->channels = 2;
        }

        success = audio_open(config->device, inf, ouf);
        if (success) {
                mixer_info_t  mi;
                uint16_t unit_len;
                assert(sp->ms           == NULL);
                assert(sp->tb           == NULL);
                assert(sp->cushion      == NULL);

                audio_non_block(config->device);

                /* Initialize read and write components */
                sp->meter_period = inf->sample_rate / 15;
                unit_len         = inf->bytes_per_block * 8 / (inf->bits_per_sample*inf->channels); 
                tx_create(&sp->tb, sp, (uint16_t)inf->sample_rate, (uint16_t)inf->channels, (uint16_t)unit_len);
                cushion_create(&sp->cushion, (uint16_t)inf->sample_rate);
		sp->cur_ts = ts_convert(inf->sample_rate, sp->cur_ts);
                mi.sample_rate   = ouf->sample_rate;
                mi.channels      = ouf->channels;
                mi.buffer_length = 32640;
                mix_create(&sp->ms, &mi, sp->cur_ts);

                if (zero_buf == NULL) {
                        zero_buf = (sample*)xmalloc(unit_len * sizeof(sample));
                        audio_zero(zero_buf, unit_len, DEV_S16);
                }
        }

        audio_format_free(&inf);
        audio_format_free(&ouf);

        return success;
}
コード例 #12
0
static int sound_open(struct inode *inode, struct file *file)
{
	int dev = MINOR(inode->i_rdev);
	int retval;

	DEB(printk("sound_open(dev=%d)\n", dev));
	if ((dev >= SND_NDEVS) || (dev < 0)) {
		printk(KERN_ERR "Invalid minor device %d\n", dev);
		return -ENXIO;
	}
	switch (dev & 0x0f) {
	case SND_DEV_CTL:
		dev >>= 4;
		if (dev >= 0 && dev < MAX_MIXER_DEV && mixer_devs[dev] == NULL) {
			char modname[20];
			sprintf(modname, "mixer%d", dev);
			request_module(modname);
		}
		if (dev && (dev >= num_mixers || mixer_devs[dev] == NULL))
			return -ENXIO;

		if (mixer_devs[dev]->owner)
			__MOD_INC_USE_COUNT (mixer_devs[dev]->owner);
		break;

	case SND_DEV_SEQ:
	case SND_DEV_SEQ2:
		if ((retval = sequencer_open(dev, file)) < 0)
			return retval;
		break;

	case SND_DEV_MIDIN:
		if ((retval = MIDIbuf_open(dev, file)) < 0)
			return retval;
		break;

	case SND_DEV_DSP:
	case SND_DEV_DSP16:
	case SND_DEV_AUDIO:
		if ((retval = audio_open(dev, file)) < 0)
			return retval;
		break;

	default:
		printk(KERN_ERR "Invalid minor device %d\n", dev);
		return -ENXIO;
	}

	return 0;
}
コード例 #13
0
ファイル: soundcard.c プロジェクト: ANFS/ANFS-kernel
static int sound_open(struct inode *inode, struct file *file)
{
	int dev = iminor(inode);
	int retval;

	DEB(printk("sound_open(dev=%d)\n", dev));
	if ((dev >= SND_NDEVS) || (dev < 0)) {
		printk(KERN_ERR "Invalid minor device %d\n", dev);
		return -ENXIO;
	}
	mutex_lock(&soundcard_mutex);
	switch (dev & 0x0f) {
	case SND_DEV_CTL:
		dev >>= 4;
		if (dev >= 0 && dev < MAX_MIXER_DEV && mixer_devs[dev] == NULL) {
			request_module("mixer%d", dev);
		}
		retval = -ENXIO;
		if (dev && (dev >= num_mixers || mixer_devs[dev] == NULL))
			break;
	
		if (!try_module_get(mixer_devs[dev]->owner))
			break;

		retval = 0;
		break;

	case SND_DEV_SEQ:
	case SND_DEV_SEQ2:
		retval = sequencer_open(dev, file);
		break;

	case SND_DEV_MIDIN:
		retval = MIDIbuf_open(dev, file);
		break;

	case SND_DEV_DSP:
	case SND_DEV_DSP16:
	case SND_DEV_AUDIO:
		retval = audio_open(dev, file);
		break;

	default:
		printk(KERN_ERR "Invalid minor device %d\n", dev);
		retval = -ENXIO;
	}

	mutex_unlock(&soundcard_mutex);
	return retval;
}
コード例 #14
0
void set_sound_user_active(UByte user)
{
    hw_TurnOnDSP();
    audio_open();
    hw_ActivateHS();
    hw_InternalSpeaker(TRUE);

    hw_RequestLsrMicGain();

    audio_stop_inband_tone();
    audio_tx_unmute();
    audio_rx_unmute();
    setMic(2);
    setVolume(3);
}
コード例 #15
0
ファイル: audio.c プロジェクト: braincat/uwin
static ssize_t dsp_write(int fd, Pfd_t* fdp, char *buff, size_t asize)
{
	DWORD size;
	HANDLE hp;
	WAVEHDR whdr;
	Pdev_t *pdev = dev_ptr(fdp->devno);

	if(!(hp=Phandle(fd)))
	{
		errno = EBADF;
		return(-1);
	}
	if(!(hp=audio_open(pdev,0)))
	{
		errno = EBADF;
		return(-1);
	}
	if(asize > SSIZE_MAX)
		asize = SSIZE_MAX;
	size = (DWORD)asize;

	/*  Preparing the Wave Header */
	ZeroMemory((void*)&whdr,sizeof(whdr));
	whdr.lpData=buff;
	whdr.dwBufferLength=size;
	whdr.dwFlags=0;
	if((*pwaveOutPrepareHeader)(hp,&whdr,sizeof(WAVEHDR)) != MMSYSERR_NOERROR)
	{
		logerr(LOG_DEV+5, "waveOutPrepareHeader");
		return(-1);
	}
	else
	{
		/* Writing to the audio port */
		whdr.dwLoops=PLAY_NUMBER_OF_TIMES;
		whdr.dwFlags|=(WHDR_BEGINLOOP|WHDR_ENDLOOP);
		if((*pwaveOutWrite)(hp,&whdr,sizeof(WAVEHDR)) != MMSYSERR_NOERROR)
		{
			logerr(LOG_DEV+5, "waveOutWrite");
			return(-1);
		}
		else
		{
			while((whdr.dwFlags != 15) && (size !=0));
			return(size);
		}
	}
}
コード例 #16
0
/* sound output support */
static int audio_write_header(AVFormatContext *s1)
{
    AudioData *s = s1->priv_data;
    AVStream *st;
    int ret;

    st = s1->streams[0];
    s->sample_rate = st->codec->sample_rate;
    s->channels = st->codec->channels;
    ret = audio_open(s1, 1, s1->filename);
    if (ret < 0) {
        return AVERROR(EIO);
    } else {
        return 0;
    }
}
コード例 #17
0
ファイル: audio.c プロジェクト: retsyx/slapstick
void audio_capabilities(struct audio_info_struct *ai)
{
	int fmts;
	int i,j,k,k1=NUM_RATES-1;
	struct audio_info_struct ai1 = *ai;

        if (param.outmode != DECODE_AUDIO) {
		memset(capabilities,1,sizeof(capabilities));
		return;
	}

	memset(capabilities,0,sizeof(capabilities));
	if(param.force_rate) {
		rates[NUM_RATES-1] = param.force_rate;
		k1 = NUM_RATES;
	}

	/* if audio_open fails, the device is just not capable of anything... */
	if(audio_open(&ai1) < 0) {
		perror("audio");
	}
	else
	{
		for(i=0;i<NUM_CHANNELS;i++) {
			for(j=0;j<NUM_RATES;j++) {
				ai1.channels = channels[i];
				ai1.rate = rates[j];
				fmts = audio_get_formats(&ai1);
				if(fmts < 0)
					continue;
				for(k=0;k<NUM_ENCODINGS;k++) {
					if((fmts & encodings[k]) == encodings[k])
						capabilities[i][k][j] = 1;
				}
			}
		}
		audio_close(&ai1);
	}

	if(param.verbose > 1) print_capabilities(ai);
}
コード例 #18
0
ファイル: serialization.cpp プロジェクト: zeromus/dasShiny
void MSU1::serialize(serializer &s) {
  Thread::serialize(s);

  s.integer(boot);

  s.integer(mmio.data_offset);
  s.integer(mmio.audio_offset);
  s.integer(mmio.audio_loop_offset);

  s.integer(mmio.audio_track);
  s.integer(mmio.audio_volume);

  s.integer(mmio.data_busy);
  s.integer(mmio.audio_busy);
  s.integer(mmio.audio_repeat);
  s.integer(mmio.audio_play);
  s.integer(mmio.audio_error);

  data_open();
  audio_open();
}
コード例 #19
0
ファイル: msu1.cpp プロジェクト: quequotion/higan-qq
void MSU1::reset() {
  create(MSU1::Enter, 44100);

  mmio.data_seek_offset = 0;
  mmio.data_read_offset = 0;

  mmio.audio_play_offset = 0;
  mmio.audio_loop_offset = 0;

  mmio.audio_track = 0;
  mmio.audio_volume = 0;

  mmio.data_busy = false;
  mmio.audio_busy = false;
  mmio.audio_repeat = false;
  mmio.audio_play = false;
  mmio.audio_error = false;

  data_open();
  audio_open();
}
コード例 #20
0
ファイル: msu1.cpp プロジェクト: PGGB/Higan-Core
void MSU1::mmio_write(unsigned addr, uint8 data) {
  cpu.synchronize_coprocessors();
  addr = 0x2000 | (addr & 7);

  switch(addr) {
  case 0x2000: mmio.data_offset = (mmio.data_offset & 0xffffff00) | (data <<  0); break;
  case 0x2001: mmio.data_offset = (mmio.data_offset & 0xffff00ff) | (data <<  8); break;
  case 0x2002: mmio.data_offset = (mmio.data_offset & 0xff00ffff) | (data << 16); break;
  case 0x2003: mmio.data_offset = (mmio.data_offset & 0x00ffffff) | (data << 24);
    if(datafile.open()) datafile.seek(mmio.data_offset);
    mmio.data_busy = false;
    break;
  case 0x2004: mmio.audio_track = (mmio.audio_track & 0xff00) | (data << 0); break;
  case 0x2005: mmio.audio_track = (mmio.audio_track & 0x00ff) | (data << 8);
    mmio.audio_offset = 0;
    audio_open();
    if(audiofile.open()) {
      uint32 header = audiofile.readm(4);
      if(header != 0x4d535531) {  //verify 'MSU1' header
        audiofile.close();
      } else {
        mmio.audio_loop_offset = 8 + audiofile.readl(4) * 4;
        mmio.audio_offset = 8;
      }
    }
    mmio.audio_busy   = false;
    mmio.audio_repeat = false;
    mmio.audio_play   = false;
    mmio.audio_error  = !audiofile.open();
    break;
  case 0x2006:
    mmio.audio_volume = data;
    break;
  case 0x2007:
    mmio.audio_repeat = data & 2;
    mmio.audio_play   = data & 1;
    break;
  }
}
コード例 #21
0
ファイル: main.c プロジェクト: awersatos/AD
void main(void)
{
    int i, j;
    audio = audio_open(AUDIO_1);
    leds  = led_open(LEDS);
    while (1)
    {
        // get audio to left and right buffers for processing.
        get_audio(stereo_buf, AUDIO_BUF_SIZE);
        for (i = 0, j = 0; i < AUDIO_BUF_SIZE ; i++, j++)
        {
          audio_buf_r[j] = stereo_buf[i++];
          audio_buf_l[j] = stereo_buf[i];
        }

        // Loop left and right channels through.
        put_audio(stereo_buf, AUDIO_BUF_SIZE);

        // Put the D.C. average value of the wavelet on LEDs
        update_intensity(abs_ave(audio_buf_l, AUDIO_BUF_SIZE/2),LEFT);
        update_intensity(abs_ave(audio_buf_r, AUDIO_BUF_SIZE/2),RIGHT);
    }
}
コード例 #22
0
int main(int argc, char *argv[])
{
    int result = ACTION_NONE;
    int leave = 0;

    /* i18n */
#ifdef ENABLE_NLS
    setlocale (LC_ALL, "");
    bindtextdomain (PACKAGE, LOCALEDIR);
    textdomain (PACKAGE);
#endif
    
    /* ltris info */
    printf( "LTris %s\nCopyright 2002-2005 Michael Speck\nPublished under GNU GPL\n---\n", VERSION );
    printf( "Looking up data in: %s\n", SRC_DIR );
#ifndef SOUND
    printf( "Compiled without sound and music\n" );
#endif

    set_random_seed(); /* set random seed */

    /* game ids - not translated to be fixed independant of language */
    strcpy(gametype_ids[0],"demo");
    strcpy(gametype_ids[1],"classic");
    strcpy(gametype_ids[2],"figures");
    strcpy(gametype_ids[3],"vshuman");
    strcpy(gametype_ids[4],"vscpu");
    strcpy(gametype_ids[5],"vshumanhuman");
    strcpy(gametype_ids[6],"vshumancpu");
    strcpy(gametype_ids[7],"vscpucpu");
    /* game names - translated for display */
    strcpy(gametype_names[0],_("Demo"));
    strcpy(gametype_names[1],_("Classic"));
    strcpy(gametype_names[2],_("Figures"));
    strcpy(gametype_names[3],_("Vs Human"));
    strcpy(gametype_names[4],_("Vs CPU"));
    strcpy(gametype_names[5],_("Vs Human&Human"));
    strcpy(gametype_names[6],_("Vs Human&CPU"));
    strcpy(gametype_names[7],_("Vs CPU&CPU"));
    config_load();

    init_sdl( SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER );
    set_video_mode( std_video_mode( config.fullscreen ) );
    SDL_WM_SetCaption( "LTris", 0 );
    sdl.fade = config.fade;
    SDL_SetEventFilter( event_filter );
#ifdef SOUND
    audio_open();
    sound_enable( config.sound );
    sound_volume( config.volume * 16 );
#endif

    /* create */
    hint_load_res();
    manager_create();    
    tetris_create();
    chart_load();
    /* run game */
    manager_fade( FADE_IN );
    while( !leave && !term_game ) {
        result = manager_run();
        switch( result ) {
            case ACTION_QUIT: leave = 1; break;
            case ACTION_MAKE_STAT:
                manager_fade( FADE_OUT );
                tetris_make_stat();
                manager_fade( FADE_IN );
                break;
            case ACTION_PLAY:
                manager_fade( FADE_OUT );
                if ( tetris_init() ) {
                    tetris_run();
                    tetris_clear();
                }
                manager_fade( FADE_IN );
                break;
            default: break;
        }
    }
    manager_fade( FADE_OUT );
    /* delete stuff */
    tetris_delete();
	manager_delete();
	chart_save();
    chart_delete();
    hint_delete_res();
    
#ifdef SOUND
    audio_close();
#endif
    config_save();

    return EXIT_SUCCESS;
}
コード例 #23
0
ファイル: player.c プロジェクト: Manishearth/moc
/* Play a file (disk file) using the given decoder. next_file is precached. */
static void play_file (const char *file, const struct decoder *f,
		const char *next_file, struct out_buf *out_buf)
{
	void *decoder_data;
	struct sound_params sound_params = { 0, 0, 0 };
	float already_decoded_time;
	struct md5_data md5;

#if !defined(NDEBUG) && defined(DEBUG)
	md5.okay = true;
	md5.len = 0;
	md5_init_ctx (&md5.ctx);
#endif

	out_buf_reset (out_buf);

	precache_wait (&precache);

	if (precache.ok && strcmp(precache.file, file)) {
		logit ("The precached file is not the file we want.");
		precache.f->close (precache.decoder_data);
		precache_reset (&precache);
	}

	if (precache.ok && !strcmp(precache.file, file)) {
		struct decoder_error err;

		logit ("Using precached file");

		assert (f == precache.f);

		sound_params = precache.sound_params;
		decoder_data = precache.decoder_data;
		set_info_channels (sound_params.channels);
		set_info_rate (sound_params.rate / 1000);

		if (!audio_open(&sound_params)) {
			md5.okay = false;
			precache.f->close (precache.decoder_data);
			precache_reset (&precache);
			return;
		}

#if !defined(NDEBUG) && defined(DEBUG)
		md5.len += precache.buf_fill;
		md5_process_bytes (precache.buf, precache.buf_fill, &md5.ctx);
#endif

		audio_send_buf (precache.buf, precache.buf_fill);

		precache.f->get_error (precache.decoder_data, &err);
		if (err.type != ERROR_OK) {
			md5.okay = false;
			if (err.type != ERROR_STREAM
					|| options_get_int(
						"ShowStreamErrors"))
				error ("%s", err.err);
			decoder_error_clear (&err);
		}

		already_decoded_time = precache.decoded_time;

		if(f->get_avg_bitrate)
			set_info_avg_bitrate (f->get_avg_bitrate(decoder_data));
		else
			set_info_avg_bitrate (0);

		bitrate_list_init (&bitrate_list);
		bitrate_list.head = precache.bitrate_list.head;
		bitrate_list.tail = precache.bitrate_list.tail;

		/* don't free list elements when reseting precache */
		precache.bitrate_list.head = NULL;
		precache.bitrate_list.tail = NULL;
	}
	else {
		struct decoder_error err;

		status_msg ("Opening...");
		decoder_data = f->open(file);
		f->get_error (decoder_data, &err);
		if (err.type != ERROR_OK) {
			f->close (decoder_data);
			status_msg ("");
			error ("%s", err.err);
			decoder_error_clear (&err);
			logit ("Can't open file, exiting");
			return;
		}

		already_decoded_time = 0.0;
		if(f->get_avg_bitrate)
			set_info_avg_bitrate (f->get_avg_bitrate(decoder_data));
		bitrate_list_init (&bitrate_list);
	}

	audio_plist_set_time (file, f->get_duration(decoder_data));
	audio_state_started_playing ();
	precache_reset (&precache);

	decode_loop (f, decoder_data, next_file, out_buf, &sound_params,
			&md5, already_decoded_time);

#if !defined(NDEBUG) && defined(DEBUG)
	if (md5.okay) {
		uint8_t buf[MD5_DIGEST_SIZE];

		md5_finish_ctx (&md5.ctx, buf);
		log_md5_sum (file, sound_params, f, buf, md5.len);
	}
#endif
}
コード例 #24
0
ファイル: audio_esd.c プロジェクト: gitpan/Audio-Play-MPG123
int audio_set_channels(struct audio_info_struct *ai)
{
  audio_close(ai);
  return audio_open(ai);
}
コード例 #25
0
ファイル: audio_esd.c プロジェクト: gitpan/Audio-Play-MPG123
int audio_set_rate(struct audio_info_struct *ai)
{
  audio_close(ai);
  return audio_open(ai);
}
コード例 #26
0
ファイル: sunplay.c プロジェクト: boukeversteegh/chise
void
play_sound_file (char *sound_file, int volume)
{
  int rrtn, wrtn;
  unsigned char buf [255];
  int file_fd;

  audio_fd = audio_open ();

  if (audio_fd < 0)
    {
      perror ("open /dev/audio");
      return;
    }

  /* where to find the proto for signal()... */
  sighup_handler = (SIGTYPE (*) (int)) signal (SIGHUP, sighandler);
  sigint_handler = (SIGTYPE (*) (int)) signal (SIGINT, sighandler);

  file_fd = open (sound_file, O_RDONLY, 0);
  if (file_fd < 0)
    {
      perror (sound_file);
      goto END_OF_PLAY;
    }

  if (init_device (volume, (unsigned char *) 0, file_fd, (unsigned int *) 0))
    goto END_OF_PLAY;

  while (1)
    {
      rrtn = read (file_fd, (char *) buf, sizeof (buf));
      if (rrtn < 0)
	{
	  perror ("read");
	  goto END_OF_PLAY;
	}
      if (rrtn == 0)
	break;

      while (1)
	{
	  wrtn = write (audio_fd, (char *) buf, rrtn);
	  if (wrtn < 0)
	    {
	      perror ("write");
	      goto END_OF_PLAY;
	    }
	  if (wrtn != 0)
	    break;

	  if (AUDIO_ERR_INTERRUPTED == audio_drain (audio_fd, 1))
	    goto END_OF_PLAY;
	}
      if (wrtn != rrtn)
	{
	  char warn_buf [255];
	  sprintf (warn_buf, "play: rrtn = %d, wrtn = %d", rrtn, wrtn);
	  warn (warn_buf);
	  goto END_OF_PLAY;
	}
    }

 END_OF_PLAY:

  if (file_fd > 0)
    close (file_fd);

  if (audio_fd > 0)
    {
      reset_device (1);
      close (audio_fd);
    }

  signal (SIGHUP, sighup_handler);
  signal (SIGINT, sigint_handler);
}
コード例 #27
0
ファイル: player.c プロジェクト: Manishearth/moc
/* Decoder loop for already opened and probably running for some time decoder.
 * next_file will be precached at eof. */
static void decode_loop (const struct decoder *f, void *decoder_data,
		const char *next_file, struct out_buf *out_buf,
		struct sound_params *sound_params, struct md5_data *md5,
		const float already_decoded_sec)
{
	bool eof = false;
	bool stopped = false;
	char buf[PCM_BUF_SIZE];
	int decoded = 0;
	struct sound_params new_sound_params;
	bool sound_params_change = false;
	float decode_time = already_decoded_sec; /* the position of the decoder
						    (in seconds) */

	out_buf_set_free_callback (out_buf, buf_free_callback);

	LOCK (curr_tags_mut);
	curr_tags = tags_new ();
	UNLOCK (curr_tags_mut);

	if (f->get_stream) {
		LOCK (decoder_stream_mut);
		decoder_stream = f->get_stream (decoder_data);
		UNLOCK (decoder_stream_mut);
	}
	else
		logit ("No get_stream() function");

	status_msg ("Playing...");

	while (1) {
		debug ("loop...");

		LOCK (request_cond_mutex);
		if (!eof && !decoded) {
			struct decoder_error err;

			UNLOCK (request_cond_mutex);

			if (decoder_stream && out_buf_get_fill(out_buf)
					< PREBUFFER_THRESHOLD) {
				prebuffering = 1;
				io_prebuffer (decoder_stream,
						options_get_int("Prebuffering")
						* 1024);
				prebuffering = 0;
				status_msg ("Playing...");
			}

			decoded = f->decode (decoder_data, buf, sizeof(buf),
					&new_sound_params);

			if (decoded)
				decode_time += decoded / (float)(sfmt_Bps(
							new_sound_params.fmt) *
						new_sound_params.rate *
						new_sound_params.channels);

			f->get_error (decoder_data, &err);
			if (err.type != ERROR_OK) {
				md5->okay = false;
				if (err.type != ERROR_STREAM
						|| options_get_int(
							"ShowStreamErrors"))
					error ("%s", err.err);
				decoder_error_clear (&err);
			}

			if (!decoded) {
				eof = true;
				logit ("EOF from decoder");
			}
			else {
				debug ("decoded %d bytes", decoded);
				if (!sound_params_eq(new_sound_params, *sound_params))
					sound_params_change = true;

				bitrate_list_add (&bitrate_list, decode_time,
						f->get_bitrate(decoder_data));
				update_tags (f, decoder_data, decoder_stream);
			}
		}

		/* Wait, if there is no space in the buffer to put the decoded
		 * data or EOF occurred and there is something in the buffer. */
		else if (decoded > out_buf_get_free(out_buf)
					|| (eof && out_buf_get_fill(out_buf))) {
			debug ("waiting...");
			if (eof && !precache.file && next_file
					&& file_type(next_file) == F_SOUND
					&& options_get_int("Precache")
					&& options_get_bool("AutoNext"))
				start_precache (&precache, next_file);
			pthread_cond_wait (&request_cond, &request_cond_mutex);
			UNLOCK (request_cond_mutex);
		}
		else
			UNLOCK (request_cond_mutex);

		/* When clearing request, we must make sure, that another
		 * request will not arrive at the moment, so we check if
		 * the request has changed. */
		if (request == REQ_STOP) {
			logit ("stop");
			stopped = true;
			md5->okay = false;
			out_buf_stop (out_buf);

			LOCK (request_cond_mutex);
			if (request == REQ_STOP)
				request = REQ_NOTHING;
			UNLOCK (request_cond_mutex);

			break;
		}
		else if (request == REQ_SEEK) {
			int decoder_seek;

			logit ("seeking");
			md5->okay = false;
			req_seek = MAX(0, req_seek);
			if ((decoder_seek = f->seek(decoder_data, req_seek))
					== -1)
				logit ("error when seeking");
			else {
				out_buf_stop (out_buf);
				out_buf_reset (out_buf);
				out_buf_time_set (out_buf, decoder_seek);
				bitrate_list_empty (&bitrate_list);
				decode_time = decoder_seek;
				eof = false;
				decoded = 0;
			}

			LOCK (request_cond_mutex);
			if (request == REQ_SEEK)
				request = REQ_NOTHING;
			UNLOCK (request_cond_mutex);

		}
		else if (!eof && decoded <= out_buf_get_free(out_buf)
				&& !sound_params_change) {
			debug ("putting into the buffer %d bytes", decoded);
#if !defined(NDEBUG) && defined(DEBUG)
			if (md5->okay) {
				md5->len += decoded;
				md5_process_bytes (buf, decoded, &md5->ctx);
			}
#endif
			audio_send_buf (buf, decoded);
			decoded = 0;
		}
		else if (!eof && sound_params_change
				&& out_buf_get_fill(out_buf) == 0) {
			logit ("Sound parameters have changed.");
			*sound_params = new_sound_params;
			sound_params_change = false;
			set_info_channels (sound_params->channels);
			set_info_rate (sound_params->rate / 1000);
			out_buf_wait (out_buf);
			if (!audio_open(sound_params)) {
				md5->okay = false;
				break;
			}
		}
		else if (eof && out_buf_get_fill(out_buf) == 0) {
			logit ("played everything");
			break;
		}
	}

	status_msg ("");

	LOCK (decoder_stream_mut);
	decoder_stream = NULL;
	f->close (decoder_data);
	UNLOCK (decoder_stream_mut);

	bitrate_list_destroy (&bitrate_list);

	LOCK (curr_tags_mut);
	if (curr_tags) {
		tags_free (curr_tags);
		curr_tags = NULL;
	}
	UNLOCK (curr_tags_mut);

	out_buf_wait (out_buf);

	if (precache.ok && (stopped || !options_get_bool ("AutoNext"))) {
		precache_wait (&precache);
		precache.f->close (precache.decoder_data);
		precache_reset (&precache);
	}
}
コード例 #28
0
ファイル: toc_win.c プロジェクト: daemqn/Atari_ST_Sources
void toc_extract( WINDOW * win )
{
  static const char * ext[] = { "avr", "raw", "raw", "wav" };
  char pathname[ 256 ];
  char prog_info[ 64 ];
  char buf[ 128 ];
  struct avr_header * avrh;
  struct wave_header * wavh;
  struct audio_entry entry;
  struct audio_stream * as;
  struct _toc_data * data;
  struct device_info * info;
  OBJECT * ck;
  int format, i, max, track_no;
  int fd, swap;
  long offset, length, position, end, progress, total_length;
  long max_buf_blocks, nblocks;
  void * buffer;

  if( !fileselect( preferences.toc_dest, "", "TXT_EXTDEST" ) )
    return;
  strrchr( preferences.toc_dest, '\\' )[1] = '\0';

  data = DataSearch( win, TW_MAGIC );
  max = data->n_tracks;
  format = fmt_popup.selected;
  total_length = 0;
  buffer = alloc_comm_buffer( BUFSIZE );
  if( !buffer )
    return;
  for( i = 0; i < max; i++ )
  {
    ck = data->tree + 1 + TF_CK + i * data->n_obj;
    if( ! (ck->ob_state & SELECTED) )
      continue;
    offset = toc_address( data->f[i].beg_time );
    length = toc_address( data->f[i].end_time ) + 1 - offset;
    if( length > 0 )
      total_length += length;
  }
  max_buf_blocks = BUFSIZE / 2352;

  progress = 0;
  progress_init( get_string( "TXT_EXTMSG" ), total_length );
  progress_activate_cancel( 1 );
  progress_init_timer();

  log_begin();
  log_printf( "*** Begin of a track extraction session\n\n" );
  as = NULL;
  for( i = 0; i < max; i++ )
  {
    ck = data->tree + 1 + TF_CK + i * data->n_obj;
    if( ! (ck->ob_state & SELECTED) )
      continue;
    offset = toc_address( data->f[i].beg_time );
    length = toc_address( data->f[i].end_time ) + 1 - offset;
    if( length <= 0 )
      continue;
    track_no = i + 1;
    position = get_track_offset( &data->toc, track_no, &end );
    if( toc_popup.selected == 0 )
      gen_daoimg_entry( &entry, toc_info.toc_file, track_no,
                        offset - position, end - offset - length );
    else
    {
      info = (struct device_info*)toc_popup.item[toc_popup.selected].info;
      gen_cd_entry( &entry, info, track_no, offset - position, end - offset - length );
    }
    if( as )
      as = audio_reopen( as, &entry );
    else
      as = audio_open( &entry );
    if( as == NULL )
      continue;

    sprintf( prog_info, get_string( "TXT_EXTTRK" ), track_no );
    progress_setinfo( prog_info );

    sprintf( pathname, "%strack%02d.%s", preferences.toc_dest, track_no, ext[ format ] );
    fd = open( pathname, O_WRONLY|O_CREAT|O_TRUNC );
    if( fd == -1 )
    {
      audio_close( as );
      alert_msg( "AL_FILERR", 1, pathname );
      goto error;
    }
    switch( format )
    {
    case 0:        /* AVR */
      avrh = (struct avr_header *) buf;
      avrh->avr_id = '2BIT';
      memset( avrh->name, 0, 8 );
      avrh->num_voices = 0xFFFF;
      avrh->num_bits = 16;
      avrh->signe = 0xffff;
      avrh->loop = 0;
      avrh->midi = 0xffff;
      avrh->freq_type.frequence = 0xff00ac44L;
      avrh->length = length * (2352 / 2);
      avrh->beg_loop = 0;
      avrh->end_loop = avrh->length;
      memset( avrh->reserved, 0, 26 + 64 );
      write( fd, avrh, sizeof( *avrh ) );
      swap = as->little_endian;
      break;
    case 1:        /* RAW big-endian */
      swap = as->little_endian;
      break;
    case 2:        /* RAW little-endian */
      swap = !as->little_endian;
      break;
    case 3:        /* WAVE */
      wavh = (struct wave_header *) buf;
      wavh->riff_id = 'RIFF';
      wavh->riff_len = swap_long( length * 2352 + 36 );
      wavh->wave_id = 'WAVE';
      wavh->fmt_id = 'fmt ';
      wavh->fmt_size = 0x10000000L;
      wavh->fmt_compression_code = 0x0100;
      wavh->fmt_channels = 0x0200;
      wavh->fmt_freq = 0x44ac0000L;
      wavh->fmt_bytes_sec = 0x10b10200L;
      wavh->fmt_block_align = 0x0400;
      wavh->fmt_num_bits = 0x1000;
      wavh->data_id = 'data';
      wavh->data_size = swap_long( length * 2352 );
      write( fd, wavh, sizeof( *wavh ) );
      swap = !as->little_endian;
      break;
    }
    while( length > 0 )
    {
      if( yield() )
      {
        audio_close( as );
        alert_msg( "AL_EXTINT", 1 );
        goto error;
      }
      nblocks = MIN( length, max_buf_blocks );
      if( audio_read( as, buffer, nblocks ) == 0 )
      {
        audio_close( as );
        goto error;
      }
      if( swap )
        swap_endian( buffer, nblocks * 2352 );
      if( write( fd, buffer, nblocks * 2352 ) == -1 )
      {
        close( fd );
        audio_close( as );
        alert_msg( "AL_FWRTERR", 1, pathname );
        goto error;
      }
      length -= nblocks;
      progress += nblocks;
      progress_setcount( progress );
    }
    close( fd );
  }
  audio_close( as );
error:
  log_printf( "*** End of the track extraction session\n\n" );
  log_end();
  progress_exit();
  free_comm_buffer( buffer );

}
コード例 #29
0
ファイル: sunplay.c プロジェクト: boukeversteegh/chise
int
play_sound_data (unsigned char *data, int length, int volume)
{
  int wrtn, start = 0;
  unsigned int ilen;
  int result = 0;

  audio_fd = -1;

  if (length == 0) return 0;

  /* this is just to get a better error message */
  if (strncmp (".snd\0", (char *) data, 4))
    {
      warn ("Not valid audio data (bad magic number)");
      goto END_OF_PLAY;
    }
  if (length <= sizeof (Audio_hdr))
    {
      warn ("Not valid audio data (too short)");
      goto END_OF_PLAY;
    }

  audio_fd = audio_open ();
  if (audio_fd < 0)
      return 0;

  /* where to find the proto for signal()... */
  sighup_handler = (SIGTYPE (*) (int)) signal (SIGHUP, sighandler);
  sigint_handler = (SIGTYPE (*) (int)) signal (SIGINT, sighandler);

  if (init_device (volume, data, 0, &ilen))
    goto END_OF_PLAY;

  data   += (ilen<<2);
  length -= (ilen<<2);
  if (length <= 1)
    goto END_OF_PLAY;

  while (1)
    {
      wrtn = write (audio_fd, (char *) (data+start), length-start);
      if (wrtn < 0)
	{
	  perror ("write");
	  goto END_OF_PLAY;
	}
      if (wrtn != 0)
	{
	  start += wrtn;
	  break;
	}
      if (AUDIO_ERR_INTERRUPTED == audio_drain (audio_fd, 1))
	goto END_OF_PLAY;
    }
  if (wrtn != length)
    {
      char buf [255];
      sprintf (buf, "play: rrtn = %d, wrtn = %d", length, wrtn);
      warn (buf);
      goto END_OF_PLAY;
    }

 result = 1;
  
 END_OF_PLAY:

  if (audio_fd > 0)
    {
      reset_device (1);
      close (audio_fd);
    }

  signal (SIGHUP, sighup_handler);
  signal (SIGINT, sigint_handler);

  return result;
}
コード例 #30
0
ファイル: audio_esd.c プロジェクト: gitpan/Audio-Play-MPG123
int audio_set_format(struct audio_info_struct *ai)
{
  audio_close(ai);
  return audio_open(ai);
}