static inline void sample_backend(int left, int right) { #if AUDIO_DEBUG int nr; for (nr = 0; nr < 4; nr++) { struct audio_channel_data *cdp = audio_channel + nr; if (cdp->state != 0 && cdp->datpt != 0 && (dmacon & (1 << nr)) && cdp->datpt >= cdp->datptend) { fprintf(stderr, "Audio output overrun on channel %d: %.8x/%.8x\n", nr, cdp->datpt, cdp->datptend); } } #endif /* samples are in range -16384 (-128*64*2) and 16256 (127*64*2) */ left <<= 16 - 14 - 1; right <<= 16 - 14 - 1; /* [-32768, 32512] */ if (sound_use_filter) { left = filter(left, &sound_filter_state[0]); right = filter(right, &sound_filter_state[1]); } *(sndbufpt++) = left; *(sndbufpt++) = right; check_sound_buffers(); }
/* This interpolator examines sample points when Paula switches the output * voltage and computes the average of Paula's output */ static void sample16i_anti_handler (void) { int datas[4], data1; samplexx_anti_handler (datas); data1 = datas[0] + datas[3] + datas[1] + datas[2]; put_sound_word_mono_func (data1); check_sound_buffers (); }
static void sample16si_sinc_handler (void) { int datas[4], data1, data2; samplexx_sinc_handler (datas); data1 = datas[0] + datas[3]; data2 = datas[1] + datas[2]; data1 = FINISH_DATA (data1); data2 = FINISH_DATA (data2); put_sound_word_stereo_func(data1, data2); check_sound_buffers (); }
void sample16s_handler (void) { uae_u32 data_l = audio_channel[0].adk_mask ? audio_channel[0].current_sample * audio_channel[0].vol : 0; uae_u32 data_r = audio_channel[1].adk_mask ? audio_channel[1].current_sample * audio_channel[1].vol : 0; if(audio_channel[2].adk_mask) data_r += audio_channel[2].current_sample * audio_channel[2].vol; if(audio_channel[3].adk_mask) data_l += audio_channel[3].current_sample * audio_channel[3].vol; data_l = FINISH_DATA(data_l); data_r = FINISH_DATA(data_r); put_sound_word_stereo_func(data_l, data_r); check_sound_buffers(); }
static void sample16si_rh_handler (void) { unsigned long delta, ratio; uae_u32 data0 = audio_channel[0].current_sample; uae_u32 data1 = audio_channel[1].current_sample; uae_u32 data2 = audio_channel[2].current_sample; uae_u32 data3 = audio_channel[3].current_sample; uae_u32 data0p = audio_channel[0].last_sample; uae_u32 data1p = audio_channel[1].last_sample; uae_u32 data2p = audio_channel[2].last_sample; uae_u32 data3p = audio_channel[3].last_sample; DO_CHANNEL_1 (data0, 0); DO_CHANNEL_1 (data1, 1); DO_CHANNEL_1 (data2, 2); DO_CHANNEL_1 (data3, 3); DO_CHANNEL_1 (data0p, 0); DO_CHANNEL_1 (data1p, 1); DO_CHANNEL_1 (data2p, 2); DO_CHANNEL_1 (data3p, 3); data0 &= audio_channel[0].adk_mask; data0p &= audio_channel[0].adk_mask; data1 &= audio_channel[1].adk_mask; data1p &= audio_channel[1].adk_mask; data2 &= audio_channel[2].adk_mask; data2p &= audio_channel[2].adk_mask; data3 &= audio_channel[3].adk_mask; data3p &= audio_channel[3].adk_mask; /* linear interpolation and summing up... */ delta = audio_channel[0].per; ratio = ((audio_channel[0].evtime % delta) << 8) / delta; data0 = (data0 * (256 - ratio) + data0p * ratio) >> 8; delta = audio_channel[1].per; ratio = ((audio_channel[1].evtime % delta) << 8) / delta; data1 = (data1 * (256 - ratio) + data1p * ratio) >> 8; delta = audio_channel[2].per; ratio = ((audio_channel[2].evtime % delta) << 8) / delta; data1 += (data2 * (256 - ratio) + data2p * ratio) >> 8; delta = audio_channel[3].per; ratio = ((audio_channel[3].evtime % delta) << 8) / delta; data0 += (data3 * (256 - ratio) + data3p * ratio) >> 8; data0 = FINISH_DATA (data0); data1 = FINISH_DATA (data1); put_sound_word_stereo_func(data0, data1); check_sound_buffers (); }
void sample16_handler (void) { uae_u32 data; if(audio_channel[0].adk_mask) data = audio_channel[0].current_sample * audio_channel[0].vol; else data = 0; if(audio_channel[1].adk_mask) data += audio_channel[1].current_sample * audio_channel[1].vol; if(audio_channel[2].adk_mask) data += audio_channel[2].current_sample * audio_channel[2].vol; if(audio_channel[3].adk_mask) data += audio_channel[3].current_sample * audio_channel[3].vol; put_sound_word_mono_func (data); check_sound_buffers (); }
/* this interpolator performs BLEP mixing (bleps are shaped like integrated sinc * functions) with a type of BLEP that matches the filtering configuration. */ static void sample16si_sinc_handler (void) { int i, n; int const *winsinc; int datas[4]; if (sound_use_filter) { n = (sound_use_filter == FILTER_MODEL_A500) ? 0 : 2; if (gui_ledstate) n += 1; } else { n = 4; } winsinc = winsinc_integral[n]; for (i = 0; i < 4; i += 1) { int j; struct audio_channel_data *acd = &audio_channel[i]; /* The sum rings with harmonic components up to infinity... */ int sum = acd->output_state << 17; /* ...but we cancel them through mixing in BLEPs instead */ int offsetpos = acd->sinc_queue_head & (SINC_QUEUE_LENGTH - 1); for (j = 0; j < SINC_QUEUE_LENGTH; j += 1) { int age = acd->sinc_queue_time - acd->sinc_queue[offsetpos].time; if (age >= SINC_QUEUE_MAX_AGE) break; sum -= winsinc[age] * acd->sinc_queue[offsetpos].output; offsetpos = (offsetpos + 1) & (SINC_QUEUE_LENGTH - 1); } datas[i] = sum >> 16; } *(sndbufpt++) = clamp_sample(datas[0] + datas[3]); *(sndbufpt++) = clamp_sample(datas[1] + datas[2]); check_sound_buffers(); }
static void sample16si_crux_handler (void) { uae_u32 data0 = audio_channel[0].current_sample; uae_u32 data1 = audio_channel[1].current_sample; uae_u32 data2 = audio_channel[2].current_sample; uae_u32 data3 = audio_channel[3].current_sample; uae_u32 data0p = audio_channel[0].last_sample; uae_u32 data1p = audio_channel[1].last_sample; uae_u32 data2p = audio_channel[2].last_sample; uae_u32 data3p = audio_channel[3].last_sample; DO_CHANNEL_1 (data0, 0); DO_CHANNEL_1 (data1, 1); DO_CHANNEL_1 (data2, 2); DO_CHANNEL_1 (data3, 3); DO_CHANNEL_1 (data0p, 0); DO_CHANNEL_1 (data1p, 1); DO_CHANNEL_1 (data2p, 2); DO_CHANNEL_1 (data3p, 3); data0 &= audio_channel[0].adk_mask; data0p &= audio_channel[0].adk_mask; data1 &= audio_channel[1].adk_mask; data1p &= audio_channel[1].adk_mask; data2 &= audio_channel[2].adk_mask; data2p &= audio_channel[2].adk_mask; data3 &= audio_channel[3].adk_mask; data3p &= audio_channel[3].adk_mask; { struct audio_channel_data *cdp; unsigned long ratio, ratio1; #define INTERVAL (scaled_sample_evtime * 3) cdp = audio_channel + 0; ratio1 = cdp->per - cdp->evtime; ratio = (ratio1 << 12) / INTERVAL; if (cdp->evtime < scaled_sample_evtime || ratio1 >= INTERVAL) ratio = 4096; data0 = (data0 * ratio + data0p * (4096 - ratio)) >> 12; cdp = audio_channel + 1; ratio1 = cdp->per - cdp->evtime; ratio = (ratio1 << 12) / INTERVAL; if (cdp->evtime < scaled_sample_evtime || ratio1 >= INTERVAL) ratio = 4096; data1 = (data1 * ratio + data1p * (4096 - ratio)) >> 12; cdp = audio_channel + 2; ratio1 = cdp->per - cdp->evtime; ratio = (ratio1 << 12) / INTERVAL; if (cdp->evtime < scaled_sample_evtime || ratio1 >= INTERVAL) ratio = 4096; data2 = (data2 * ratio + data2p * (4096 - ratio)) >> 12; cdp = audio_channel + 3; ratio1 = cdp->per - cdp->evtime; ratio = (ratio1 << 12) / INTERVAL; if (cdp->evtime < scaled_sample_evtime || ratio1 >= INTERVAL) ratio = 4096; data3 = (data3 * ratio + data3p * (4096 - ratio)) >> 12; } data1 += data2; data0 += data3; data0 = FINISH_DATA (data0); data1 = FINISH_DATA (data1); put_sound_word_stereo_func(data0, data1); check_sound_buffers (); }