int MYWAVE::poll() { AGS::Engine::MutexLock _lock(_mutex); if (!done && _destroyThis) { internal_destroy(); _destroyThis = false; } if (wave == NULL) { return 1; } if (paused) { return 0; } if (firstTime) { // need to wait until here so that we have been assigned a channel //sample_update_callback(wave, voice); firstTime = 0; } if (voice_get_position(voice) < 0) { done = 1; if (psp_audio_multithreaded) internal_destroy(); } return done; }
// clean up finished samples void sfx_cleanup() { for(int i=0; i<WAV_COUNT; i++) if(sfx_voice[i]!=-1 && voice_get_position(sfx_voice[i])<0) { deallocate_voice(sfx_voice[i]); sfx_voice[i]=-1; } }
int MYWAVE::get_pos_ms() { // convert the offset in samples into the offset in ms //return ((1000000 / voice_get_frequency(voice)) * voice_get_position(voice)) / 1000; if (voice_get_frequency(voice) < 100) return 0; // (number of samples / (samples per second / 100)) * 10 = ms return (voice_get_position(voice) / (voice_get_frequency(voice) / 100)) * 10; }
void resume_mod (void) { int index; mi.forbid = TRUE; mi.pause = FALSE; for (index=0; index<(mi.max_chn); index++) { if (voice_get_position (voice_table[index]) >=0) voice_start (voice_table[index]); } mi.forbid = FALSE; }
// plays an sfx sample void sfx(int index,int pan,bool loop, bool restart) { if(!sfx_init(index)) return; voice_set_playmode(sfx_voice[index],loop?PLAYMODE_LOOP:PLAYMODE_PLAY); voice_set_pan(sfx_voice[index],pan); int pos = voice_get_position(sfx_voice[index]); if(restart) voice_set_position(sfx_voice[index],0); if(pos<=0) voice_start(sfx_voice[index]); }
void JamulSoundUpdate(void) { int i; for (i = 0; i < MAX_SOUNDS_AT_ONCE; i++) { if (playBuffer[i].voice != -1 && playBuffer[i].flags & SND_PLAYING) { if (voice_get_position(playBuffer[i].voice) == -1) { playBuffer[i].flags &= (~SND_PLAYING); } } } }
int MYOGG::get_pos_ms() { // Unfortunately the alogg_get_pos_msecs_oggstream function // returns the ms offset that was last decoded, so it's always // ahead of the actual playback. Therefore we have this // hideous hack below to sort it out. if ((done) || (!alogg_is_playing_oggstream(stream))) return 0; AUDIOSTREAM *str = alogg_get_audiostream_oggstream(stream); long offs = (voice_get_position(str->voice) * 1000) / str->samp->freq; if (last_ms_offs != alogg_get_pos_msecs_oggstream(stream)) { last_but_one_but_one = last_but_one; last_but_one = last_ms_offs; last_ms_offs = alogg_get_pos_msecs_oggstream(stream); } // just about to switch buffers if (offs < 0) return last_but_one; int end_of_stream = alogg_is_end_of_oggstream(stream); if ((str->active == 1) && (last_but_one_but_one > 0) && (str->locked == NULL)) { switch (end_of_stream) { case 0: case 2: offs -= (last_but_one - last_but_one_but_one); break; case 1: offs -= (last_but_one - last_but_one_but_one); break; } } /* char tbuffer[260]; sprintf(tbuffer,"offs: %d last_but_one_but_one: %d last_but_one: %d active:%d locked: %p EOS: %d", offs, last_but_one_but_one, last_but_one, str->active, str->locked, end_of_stream); write_log(tbuffer);*/ if (end_of_stream == 1) { return offs + last_but_one; } return offs + last_but_one_but_one; }
int poll() { if (wave == NULL) return 1; if (paused) return 0; if (firstTime) { // need to wait until here so that we have been assigned a channel //sample_update_callback(wave, voice); firstTime = 0; } if (voice_get_position(voice) < 0) done = 1; return done; }
static int allegro_bufferspace(void) { int ret, pos; /* voice_get_position returns current position in samples. */ pos = voice_get_position(voice) * sizeof(SWORD); ret = buffer_offset - pos; if (ret < 0) ret += buffer_len; ret /= sizeof(SWORD); if (ret > (int)written_samples) ret = written_samples; return buffer_len/sizeof(SWORD) - ret; }
// start it (in loop mode) if it's not already playing, // otherwise adjust it to play in loop mode -DD void cont_sfx(int index) { if(!sfx_init(index)) { return; } if(voice_get_position(sfx_voice[index])<=0) { voice_set_position(sfx_voice[index],0); voice_set_playmode(sfx_voice[index],PLAYMODE_LOOP); voice_start(sfx_voice[index]); } else { adjust_sfx(index, 128, true); } }
int MYSTATICOGG::get_pos_ms() { // Unfortunately the alogg_get_pos_msecs function // returns the ms offset that was last decoded, so it's always // ahead of the actual playback. Therefore we have this // hideous hack below to sort it out. if ((done) || (!alogg_is_playing_ogg(tune))) return 0; AUDIOSTREAM *str = alogg_get_audiostream_ogg(tune); long offs = (voice_get_position(str->voice) * 1000) / str->samp->freq; if (last_ms_offs != alogg_get_pos_msecs_ogg(tune)) { last_but_one_but_one = last_but_one; last_but_one = last_ms_offs; last_ms_offs = alogg_get_pos_msecs_ogg(tune); } // just about to switch buffers if (offs < 0) return last_but_one; int end_of_stream = alogg_is_end_of_ogg(tune); if ((str->active == 1) && (last_but_one_but_one > 0) && (str->locked == NULL)) { switch (end_of_stream) { case 0: case 2: offs -= (last_but_one - last_but_one_but_one); break; case 1: offs -= (last_but_one - last_but_one_but_one); break; } } if (end_of_stream == 1) { return offs + last_but_one + extraOffset; } return offs + last_but_one_but_one + extraOffset; }
bool JamulSoundPlay(int voice, long pan, long vol, byte playFlags) { // if this copy is in use, can't play it if (voice_get_position(voice) > 0) { if (playFlags & SOUND_CUTOFF) { voice_set_position(voice, 0); // keep going to handle the rest of the stuff } else return FALSE; // can't play if it's playing } // set the pan and volume and start the voice voice_set_volume(voice, vol); voice_set_pan(voice, pan); voice_start(voice); return TRUE; }
void cSoundPlayer::think() { if (maximumVoices < 0) return; for(int i = 0;i < maximumVoices; i++) { int pos; int voice = voices[i]; //if it contains a voice get its position if(voice != -1) { pos = voice_get_position(voice); } else { pos = -2; } //if it is at the end then release it if(pos == -1) { destroySound(voice, true); } } }
static void updateaudiostream(void) { extern int throttle; INT16 *data = stream_cache_data; int stereo = stream_cache_stereo; int len = stream_cache_len; int buflen; int start,end; if (!stream_playing) return; /* error */ buflen = audio_buffer_length; start = voice_pos; end = voice_pos + len; if (end > buflen) end -= buflen; #ifdef USE_SEAL if (throttle) /* sync with audio only when speed throttling is not turned off */ { profiler_mark(PROFILER_IDLE); for (;;) { LONG curpos; AGetVoicePosition(hVoice[0],&curpos); if (start < end) { if (curpos < start || curpos >= end) break; } else { if (curpos < start && curpos >= end) break; } AUpdateAudioEx(Machine->sample_rate / Machine->drv->frames_per_second); } profiler_mark(PROFILER_END); } if (stereo) { INT16 *bufL,*bufR; int p; bufL = (INT16 *)lpWave[0]->lpData; bufR = (INT16 *)lpWave[1]->lpData; p = start; while (p != end) { if (p >= buflen) p -= buflen; bufL[p] = *data++; bufR[p] = *data++; p++; } } else { INT16 *buf; int p; buf = (INT16 *)lpWave[0]->lpData; p = start; while (p != end) { if (p >= buflen) p -= buflen; buf[p] = *data++; p++; } } if (start < end) { AWriteAudioData(lpWave[0],2*start,2*len); if (stereo) AWriteAudioData(lpWave[1],2*start,2*len); } else { int remain = buflen-start; AWriteAudioData(lpWave[0],2*start,2*remain); AWriteAudioData(lpWave[0],0,2*(len-remain)); if (stereo) { AWriteAudioData(lpWave[1],2*start,2*remain); AWriteAudioData(lpWave[1],0,2*(len-remain)); } } #endif #ifdef USE_ALLEGRO { if (throttle) /* sync with audio only when speed throttling is not turned off */ { profiler_mark(PROFILER_IDLE); for (;;) { int curpos; curpos = voice_get_position(myvoice); if (start < end) { if (curpos < start || curpos >= end) break; } else { if (curpos < start && curpos >= end) break; } } profiler_mark(PROFILER_END); } if (stereo) { INT16 *buf = mysample->data; int p = start; while (p != end) { if (p >= buflen) p -= buflen; buf[2*p] = (*data++ * master_volume / 256) ^ 0x8000; buf[2*p+1] = (*data++ * master_volume / 256) ^ 0x8000; p++; } } else { INT16 *buf = mysample->data; int p = start; while (p != end) { if (p >= buflen) p -= buflen; buf[p] = (*data++ * master_volume / 256) ^ 0x8000; p++; } } } #endif voice_pos = end; if (voice_pos == buflen) voice_pos = 0; }
int _apeg_audio_flush(APEG_LAYER *layer) { unsigned char *buf = layer->audio.pcm.samples; unsigned char *data; int hs; int ret = APEG_OK; if(layer->audio.pcm.point < layer->audio.bufsize) { int count = layer->audio.pcm.point / 2; int samplesend = layer->audio.bufsize / 2; while(count < samplesend) ((short*)buf)[count++] = 0x8000; if(layer->audio.pcm.point == 0) ret = APEG_EOF; } if(layer->audio.callback) { if(ret != APEG_OK) return ret; ret = layer->audio.callback((APEG_STREAM*)layer, buf, layer->audio.pcm.point, layer->audio.callback_arg); if(ret < 0) return APEG_ERROR; if(ret > 0) { layer->audio.pos += ret / 2 / layer->stream.audio.channels; layer->audio.pcm.point -= ret; if(layer->audio.pcm.point > 0) memmove(buf, buf+ret, layer->audio.pcm.point); layer->stream.audio.flushed = TRUE; if(!(layer->stream.flags&APEG_HAS_VIDEO)) layer->stream.pos = (double)layer->audio.pos / (double)layer->stream.audio.freq; } return APEG_OK; } /* We need to test the stream buffer to see if it's ready for more audio * yet. */ hs = layer->audio.stream->len/2; if((layer->audio.buf_segment && voice_get_position(layer->audio.voice) >= hs) || (!layer->audio.buf_segment && voice_get_position(layer->audio.voice) < hs)) return ret; voice_stop(layer->audio.voice); data = layer->audio.stream->data; data += layer->audio.buf_segment * hs * layer->stream.audio.channels * 2; /* Commit the buffer to the stream and update the time */ memcpy(data, buf, layer->audio.bufsize); voice_start(layer->audio.voice); layer->audio.buf_segment ^= 1; layer->audio.pos += ((layer->audio.pcm.point >= layer->audio.bufsize) ? layer->audio.samples_per_update : (layer->audio.pcm.point/2/layer->stream.audio.channels)); if(!(layer->stream.flags&APEG_HAS_VIDEO)) layer->stream.pos = (double)layer->audio.pos / (double)layer->stream.audio.freq; /* Remove the old data and put the unused samples at the beginning */ layer->audio.pcm.point -= layer->audio.bufsize; if(layer->audio.pcm.point > 0) memmove(buf, buf+layer->audio.bufsize, layer->audio.pcm.point); else if(layer->audio.pcm.point < 0) layer->audio.pcm.point = 0; layer->stream.audio.flushed = TRUE; return ret; }
static int allegro_write(SWORD *pbuf, size_t nr) { static int counter; unsigned int i, count; /*unsigned int write_size;*/ counter++; /* XXX: Assumes `nr' is multiple of `fragment_size'. This is always the case with the current implementation. */ count = nr / (fragment_size / sizeof(SWORD)); /* Write one fragment at a time. FIXME: This could be faster. */ for (i = 0; i < count; i++, pbuf += fragment_size / sizeof(SWORD)) { if (!been_suspended) { unsigned int write_end; /* XXX: We do not use module here because we assume we always write full fragments. */ write_end = buffer_offset + fragment_size - 1; /* Block if we are at the position the soundcard is playing. Notice that we also assume that the part of the buffer we are going to lock is small enough to fit in the safe space. */ while (1) { unsigned int pos = sizeof(SWORD) * voice_get_position(voice); unsigned int pos2 = pos + fragment_size; if (pos2 < buffer_len) { if (buffer_offset >= pos2 || write_end < pos) break; } else { pos2 -= buffer_len; if (write_end < pos && buffer_offset >= pos2) break; } } } /* Write fragment. */ { unsigned int j; WORD *p = (WORD *) (buffer->data + buffer_offset); /* XXX: Maybe the SID engine could already produce samples in unsigned format as we need them here? */ for (j = 0; j < fragment_size / sizeof(SWORD); j++) p[j] = pbuf[j] + 0x8000; } buffer_offset += fragment_size; if (buffer_offset >= buffer_len) buffer_offset = 0; if (been_suspended) { been_suspended = 0; voice_set_position(voice, 0); voice_start(voice); } } written_samples += nr; if (written_samples > buffer_len) written_samples = buffer_len; return 0; }
int get_pos() { return voice_get_position(voice); }
int MYWAVE::get_pos() { return voice_get_position(voice); }
void saPlayBufferedStreamedSampleBase( int channel, signed char *data, int len, int freq, int volume, int bits , int pan ){ /* This version works at low level, creating a sample, and following its advancement directly in the voice_position... */ int i; short *dout; short *dfin; signed short *din; //fprintf(stderr,"saPlayBuffer %d freq %d bits %d pan %d len %d\n",channel,freq,bits,pan,len); if( audio_sample_rate == 0 || channel >= NUMVOICES ) return; if( SndMachine == NULL ) return; if( !playing[channel] ){ #ifdef USE_COMPENS int fin = stream_buffer_max * freq * 2 / fps; #else int fin = stream_buffer_max * len; #endif if( lpWave[channel] ){ destroy_sample( lpWave[channel] ); lpWave[channel] = 0; } if (!(lpWave[channel] = create_sample(16,0,freq,fin/2))){ lpWave[channel] = 0; return; } // memset( lpWave[channel]->data, 0, fin ); dout=lpWave[channel]->data; dfin=(short*) (((char*)lpWave[channel]->data)+fin); // Fill the buffer with 0 (signed) in case the soundcards reads what // is after the sample... while (dout < dfin) *(dout++) = 0x8000; vend[channel] = dfin; counter[channel] = 0; hVoice[channel] = allocate_voice( lpWave[channel] ); if (hVoice[channel]<0) { allegro_message("allocate_voice failed !\n"); exit(1); } voice_set_playmode(hVoice[channel],PLAYMODE_LOOP); playing[channel] = 1; /* use front surface */ init_mixing_buff(len); /**** make sound temp. buffer ****/ if (enh_stereo && SamplePan[channel] == PAN_LEFT) dout=(short*) (((char*)lpWave[channel]->data)+len*(MODEB_UPDATE_COUNT+1)); //+len*MODEB_UPDATE_COUNT); else dout=(short*) (((char*)lpWave[channel]->data)+len*MODEB_UPDATE_COUNT); //+len*MODEB_UPDATE_COUNT); din = ((signed short*)data); // memcpy( dout, din, len ); for (i=0; i<len; i+=2){ *(dout++) = *(din++)^0x8000; } update_recording(channel,data); if (dout ==dfin){ dout=(short*) (((char*)lpWave[channel]->data)); } #ifdef DUMP_CHANNELS fwrite( lpWave[channel]->data+len*MODEB_UPDATE_COUNT, 1, len, stream_out[channel]); #endif vout[channel] = dout; saSetVolume(channel,SampleVol[channel]); saSetPan(channel,SamplePan[channel]); voice_set_position(hVoice[channel],0); voice_start(hVoice[channel]); pos_counter[channel] = 0; } else{ int pos = voice_get_position(hVoice[channel]); int th_pos; int count = (enh_stereo && SamplePan[channel] == PAN_LEFT ? MODEB_UPDATE_COUNT + 1 : MODEB_UPDATE_COUNT); // this difference between the theorical position and the actual // position is because clearly the reported position is directly // dependant of when the sound driver updated the voice for the // last time. Luckily the difference is big only when the voice // starts. After starting it gets more or less updated when it // should, depending on external factors too like the cpu load. dout=vout[channel]; th_pos = (dout - ((INT16 *)lpWave[channel]->data)- count*len/2); if (th_pos < 0) th_pos += stream_buffer_max * len/2; // if there is more than count frames between pos and th_pos, then // wait for the voice. if (pos < th_pos) { if (th_pos - pos < count * len/2) { more_stream = -1; // drop next frame } } din = ((signed short*)data); dfin = vend[channel]; // memcpy(dout,din,len); for (i=0; i<len; i+=2){ *(dout++) = *(din++)^0x8000; } update_recording(channel,data); if (dout >=dfin){ dout=(short*) (((char*)lpWave[channel]->data)); } #ifdef DUMP_CHANNELS fwrite( lpWave[channel]->data+len*s_pos, 1, len, stream_out[channel]); #endif vout[channel] = dout; // more than count frames of advance : more stream ! pos -= count*len/2; if (pos > th_pos && pos > 0) { // send more more_stream = 1; } } }