int64_t DSoundBuf::GetPosition() const { DWORD iCursor, iJunk; HRESULT hr = m_pBuffer->GetCurrentPosition( &iCursor, &iJunk ); ASSERT_M( SUCCEEDED(hr), hr_ssprintf(hr, "GetCurrentPosition") ); /* This happens occasionally on "Realtek AC97 Audio". */ if( (int) iCursor == m_iBufferSize ) iCursor = 0; ASSERT_M( (int) iCursor < m_iBufferSize, ssprintf("%i, %i", iCursor, m_iBufferSize) ); int iCursorFrames = int(iCursor) / bytes_per_frame(); int iWriteCursorFrames = m_iWriteCursor / bytes_per_frame(); int iFramesBehind = iWriteCursorFrames - iCursorFrames; /* iFramesBehind will be 0 if we're called before the buffer starts playing: * both iWriteCursorFrames and iCursorFrames will be 0. */ if( iFramesBehind < 0 ) iFramesBehind += buffersize_frames(); /* unwrap */ int64_t iRet = m_iWriteCursorPos - iFramesBehind; /* Failsafe: never return a value smaller than we've already returned. * This can happen once in a while in underrun conditions. */ iRet = max( m_iLastPosition, iRet ); m_iLastPosition = iRet; return iRet; }
/* Check to make sure that, given the current writeahead and chunksize, we're * capable of filling the prefetch region entirely. If we aren't, increase * the writeahead. If this happens, we're underruning. */ void DSoundBuf::CheckWriteahead( int iCursorStart, int iCursorEnd ) { /* If we're in a recovering-from-underrun state, stop. */ if( m_iExtraWriteahead ) return; /* If the driver is requesting an unreasonably large prefetch, ignore it entirely. * Some drivers seem to give broken write cursors sporadically, requesting that * almost the entire buffer be filled. There's no reason a driver should ever need * more than 8k frames of writeahead. */ int iPrefetch = iCursorEnd - iCursorStart; wrap( iPrefetch, m_iBufferSize ); if( iPrefetch >= 1024*32 ) { static bool bLogged = false; if( bLogged ) return; bLogged = true; LOG->Warn("Sound driver is requesting an overly large prefetch: wants %i (cursor at %i..%i), writeahead not adjusted", iPrefetch / bytes_per_frame(), iCursorStart, iCursorEnd ); return; } if( m_iWriteAhead >= iPrefetch ) return; /* We need to increase the writeahead. */ LOG->Trace("insufficient writeahead: wants %i (cursor at %i..%i), writeahead adjusted from %i to %i", iPrefetch / bytes_per_frame(), iCursorStart, iCursorEnd, m_iWriteAhead, iPrefetch ); m_iWriteAhead = iPrefetch; }
static void cubeb_submit_buffer(cubeb_stream * stm, WAVEHDR * hdr) { long got; MMRESULT r; got = stm->data_callback(stm, stm->user_ptr, hdr->lpData, hdr->dwBufferLength / bytes_per_frame(stm->params)); if (got < 0) { /* XXX handle this case */ assert(0); return; } else if ((DWORD) got < hdr->dwBufferLength / bytes_per_frame(stm->params)) { r = waveOutUnprepareHeader(stm->waveout, hdr, sizeof(*hdr)); assert(r == MMSYSERR_NOERROR); hdr->dwBufferLength = got * bytes_per_frame(stm->params); r = waveOutPrepareHeader(stm->waveout, hdr, sizeof(*hdr)); assert(r == MMSYSERR_NOERROR); stm->draining = 1; } assert(hdr->dwFlags & WHDR_PREPARED); r = waveOutWrite(stm->waveout, hdr, sizeof(*hdr)); assert(r == MMSYSERR_NOERROR); }
static int directsound_stream_get_position(cubeb_stream * stm, uint64_t * position) { EnterCriticalSection(&stm->lock); DWORD play, write; HRESULT rv = stm->buffer->GetCurrentPosition(&play, &write); assert(rv == DS_OK); // XXX upper limit on position is stm->written, // XXX then adjust by overflow timer // XXX then adjust by play positiong97 unsigned long writepos = stm->written % stm->buffer_size; long space = play - writepos; if (space <= 0) { space += stm->buffer_size; } if (!stm->active) { space = 0; } long delay = stm->buffer_size - space; double pos = (double) ((stm->written - delay) / bytes_per_frame(stm->params)) / (double) stm->params.rate * 1000.0; #if 1 fprintf(stderr, "w=%lu space=%ld delay=%ld pos=%.2f (p=%u w=%u)\n", stm->written, space, delay, pos, play, write); #endif *position = (stm->written - delay) / bytes_per_frame(stm->params); LeaveCriticalSection(&stm->lock); return CUBEB_OK; }
static void cubeb_refill_stream(cubeb_stream * stm) { WAVEHDR * hdr; long got; long wanted; MMRESULT r; EnterCriticalSection(&stm->lock); stm->free_buffers += 1; assert(stm->free_buffers > 0 && stm->free_buffers <= NBUFS); if (stm->draining) { LeaveCriticalSection(&stm->lock); if (stm->free_buffers == NBUFS) { stm->state_callback(stm, stm->user_ptr, CUBEB_STATE_DRAINED); } SetEvent(stm->event); return; } if (stm->shutdown) { LeaveCriticalSection(&stm->lock); SetEvent(stm->event); return; } hdr = cubeb_get_next_buffer(stm); wanted = (DWORD) stm->buffer_size / bytes_per_frame(stm->params); /* It is assumed that the caller is holding this lock. It must be dropped during the callback to avoid deadlocks. */ LeaveCriticalSection(&stm->lock); got = stm->data_callback(stm, stm->user_ptr, hdr->lpData, wanted); EnterCriticalSection(&stm->lock); if (got < 0) { LeaveCriticalSection(&stm->lock); /* XXX handle this case */ assert(0); return; } else if (got < wanted) { stm->draining = 1; } assert(hdr->dwFlags & WHDR_PREPARED); hdr->dwBufferLength = got * bytes_per_frame(stm->params); assert(hdr->dwBufferLength <= stm->buffer_size); r = waveOutWrite(stm->waveout, hdr, sizeof(*hdr)); if (r != MMSYSERR_NOERROR) { LeaveCriticalSection(&stm->lock); stm->state_callback(stm, stm->user_ptr, CUBEB_STATE_ERROR); return; } LeaveCriticalSection(&stm->lock); }
int frames_per_buffer( const media_raw_audio_format & format) { // This will give us the number of full-sized frames that will fit // in a buffer. (Remember, integer division automatically rounds // down.) int frames = 0; if (bytes_per_frame(format) > 0) { frames = format.buffer_size / bytes_per_frame(format); } return frames; }
void AudioFilterNode::processBuffer( BBuffer* inputBuffer, BBuffer* outputBuffer) { ASSERT(inputBuffer); ASSERT(outputBuffer); ASSERT(m_op); // create wrapper objects AudioBuffer input(m_input.format.u.raw_audio, inputBuffer); AudioBuffer output(m_output.format.u.raw_audio, outputBuffer); double sourceOffset = 0.0; uint32 destinationOffset = 0L; // when is the first frame due to be consumed? bigtime_t startTime = outputBuffer->Header()->start_time; // when is the next frame to be produced going to be consumed? bigtime_t targetTime = startTime; // when will the first frame of the next buffer be consumed? bigtime_t endTime = startTime + BufferDuration(); uint32 framesRemaining = input.frames(); while(framesRemaining) { // handle all events occurring before targetTime // +++++ bigtime_t nextEventTime = endTime; // look for next event occurring before endTime // +++++ // process up to found event, if any, or to end of buffer int64 toProcess = frames_for_duration(output.format(), nextEventTime - targetTime); ASSERT(toProcess > 0); uint32 processed = m_op->process( input, output, sourceOffset, destinationOffset, (uint32)toProcess, targetTime); if(processed < toProcess) { // +++++ in offline mode this will have to request additional buffer(s), right? PRINT(( "*** AudioFilterNode::processBuffer(): insufficient frames filled\n")); } if(toProcess > framesRemaining) framesRemaining = 0; else framesRemaining -= toProcess; // advance target time targetTime = nextEventTime; // +++++ might this drift from the real frame offset? } outputBuffer->Header()->size_used = input.frames() * bytes_per_frame(m_output.format.u.raw_audio); // PRINT(("### output size: %ld\n", outputBuffer->Header()->size_used)); }
bigtime_t buffer_duration( const media_raw_audio_format & format) { // Figuring out duration is easy. We take extra precaution to // not divide by zero or return irrelevant results. bigtime_t duration = 0; if (format.buffer_size > 0 && format.frame_rate > 0 && bytes_per_frame(format) > 0) { // In these kinds of calculations, it's always useful to double-check // the unit conversions. (Anyone remember high school physics?) // bytes/(bytes/frame) / frames/sec // = frames * sec/frames // = secs which is what we want. duration = s_to_us((format.buffer_size / bytes_per_frame(format)) / format.frame_rate); } return duration; }
status_t _AudioAdapterNode::validateProposedOutputFormat( const media_format& preferredFormat, media_format& ioProposedFormat) { status_t err = _inherited::validateProposedOutputFormat( preferredFormat, ioProposedFormat); media_raw_audio_format& w = media_raw_audio_format::wildcard; if(input().source != media_source::null) { // an input connection exists; constrain the output format // is there enough information to suggest a buffer size? if( ioProposedFormat.u.raw_audio.format != w.format && ioProposedFormat.u.raw_audio.channel_count != w.channel_count) { size_t target_buffer_size = bytes_per_frame(ioProposedFormat.u.raw_audio) * frames_per_buffer(input().format.u.raw_audio); if(ioProposedFormat.u.raw_audio.buffer_size != target_buffer_size) { if(ioProposedFormat.u.raw_audio.buffer_size != w.buffer_size) err = B_MEDIA_BAD_FORMAT; ioProposedFormat.u.raw_audio.buffer_size = target_buffer_size; } } // require same frame rate as input if(ioProposedFormat.u.raw_audio.frame_rate != input().format.u.raw_audio.frame_rate) { if(ioProposedFormat.u.raw_audio.frame_rate != w.frame_rate) err = B_MEDIA_BAD_FORMAT; ioProposedFormat.u.raw_audio.frame_rate = input().format.u.raw_audio.frame_rate; } } char fmt_string[256]; string_for_format(ioProposedFormat, fmt_string, 255); PRINT(( "### _AudioAdapterNode::validateProposedOutputFormat():\n" " %s\n", fmt_string)); return err; }
status_t _AudioAdapterNode::getPreferredOutputFormat( media_format& ioFormat) { status_t err = _inherited::getPreferredOutputFormat(ioFormat); if(err < B_OK) return err; _AudioAdapterParams* p = dynamic_cast<_AudioAdapterParams*>(parameterSet()); ASSERT(p); media_raw_audio_format& w = media_raw_audio_format::wildcard; // copy user preferences if(p->outputFormat.format != w.format) ioFormat.u.raw_audio.format = p->outputFormat.format; if(p->outputFormat.channel_count != w.channel_count) ioFormat.u.raw_audio.channel_count = p->outputFormat.channel_count; //// // if one end is connected, prefer not to do channel conversions [15sep99] //// if(input().source != media_source::null) //// ioFormat.u.raw_audio.channel_count = input().format.u.raw_audio.channel_count; // if input connected, constrain: // buffer_size // frame_rate if(input().source != media_source::null) { // if the user doesn't care, default to the input's frame format if(ioFormat.u.raw_audio.format == w.format) ioFormat.u.raw_audio.format = input().format.u.raw_audio.format; if(ioFormat.u.raw_audio.channel_count == w.channel_count) ioFormat.u.raw_audio.channel_count = input().format.u.raw_audio.channel_count; ioFormat.u.raw_audio.buffer_size = bytes_per_frame(ioFormat.u.raw_audio) * frames_per_buffer(input().format.u.raw_audio); PRINT(("##### preferred output buffer_size: %ld (%x)\n", ioFormat.u.raw_audio.buffer_size, ioFormat.u.raw_audio.buffer_size)); ioFormat.u.raw_audio.frame_rate = input().format.u.raw_audio.frame_rate; } return B_OK; }
status_t _AudioAdapterNode::getPreferredInputFormat( media_format& ioFormat) { status_t err = _inherited::getPreferredInputFormat(ioFormat); if(err < B_OK) return err; _AudioAdapterParams* p = dynamic_cast<_AudioAdapterParams*>(parameterSet()); ASSERT(p); media_raw_audio_format& f = ioFormat.u.raw_audio; media_raw_audio_format& w = media_raw_audio_format::wildcard; // copy user preferences if(p->inputFormat.format != w.format) f.format = p->inputFormat.format; if(p->inputFormat.channel_count != w.channel_count) f.channel_count = p->inputFormat.channel_count; // // if one end is connected, prefer not to do channel conversions [15sep99] // if(output().destination != media_destination::null) // ioFormat.u.raw_audio.channel_count = output().format.u.raw_audio.channel_count; // if output connected, constrain: // buffer_size // frame_rate if(output().destination != media_destination::null) { // if the user doesn't care, default to the output's frame format if(f.format == w.format) f.format = output().format.u.raw_audio.format; if(f.channel_count == w.channel_count) f.channel_count = output().format.u.raw_audio.channel_count; f.buffer_size = bytes_per_frame(f) * frames_per_buffer(output().format.u.raw_audio); f.frame_rate = output().format.u.raw_audio.frame_rate; } return B_OK; }
int cubeb_stream_init(cubeb * context, cubeb_stream ** stream, char const * stream_name, cubeb_stream_params stream_params, unsigned int latency, cubeb_data_callback data_callback, cubeb_state_callback state_callback, void * user_ptr) { MMRESULT r; WAVEFORMATEXTENSIBLE wfx; cubeb_stream * stm; int i; size_t bufsz; assert(context); assert(stream); *stream = NULL; if (stream_params.rate < 1 || stream_params.rate > 192000 || stream_params.channels < 1 || stream_params.channels > 32 || latency < 1 || latency > 2000) { return CUBEB_ERROR_INVALID_FORMAT; } memset(&wfx, 0, sizeof(wfx)); if (stream_params.channels > 2) { wfx.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE; wfx.Format.cbSize = sizeof(wfx) - sizeof(wfx.Format); } else { wfx.Format.wFormatTag = WAVE_FORMAT_PCM; if (stream_params.format == CUBEB_SAMPLE_FLOAT32LE) { wfx.Format.wFormatTag = WAVE_FORMAT_IEEE_FLOAT; } wfx.Format.cbSize = 0; } wfx.Format.nChannels = stream_params.channels; wfx.Format.nSamplesPerSec = stream_params.rate; /* XXX fix channel mappings */ wfx.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; switch (stream_params.format) { case CUBEB_SAMPLE_S16LE: wfx.Format.wBitsPerSample = 16; wfx.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; break; case CUBEB_SAMPLE_FLOAT32LE: wfx.Format.wBitsPerSample = 32; wfx.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; break; default: return CUBEB_ERROR_INVALID_FORMAT; } wfx.Format.nBlockAlign = (wfx.Format.wBitsPerSample * wfx.Format.nChannels) / 8; wfx.Format.nAvgBytesPerSec = wfx.Format.nSamplesPerSec * wfx.Format.nBlockAlign; wfx.Samples.wValidBitsPerSample = 0; wfx.Samples.wSamplesPerBlock = 0; wfx.Samples.wReserved = 0; EnterCriticalSection(&context->lock); /* CUBEB_STREAM_MAX is a horrible hack to avoid a situation where, when many streams are active at once, a subset of them will not consume (via playback) or release (via waveOutReset) their buffers. */ if (context->active_streams >= CUBEB_STREAM_MAX) { LeaveCriticalSection(&context->lock); return CUBEB_ERROR; } context->active_streams += 1; LeaveCriticalSection(&context->lock); stm = calloc(1, sizeof(*stm)); assert(stm); stm->context = context; stm->params = stream_params; stm->data_callback = data_callback; stm->state_callback = state_callback; stm->user_ptr = user_ptr; bufsz = (size_t) (stm->params.rate / 1000.0 * latency * bytes_per_frame(stm->params) / NBUFS); if (bufsz % bytes_per_frame(stm->params) != 0) { bufsz += bytes_per_frame(stm->params) - (bufsz % bytes_per_frame(stm->params)); } assert(bufsz % bytes_per_frame(stm->params) == 0); stm->buffer_size = bufsz; InitializeCriticalSection(&stm->lock); stm->event = CreateEvent(NULL, FALSE, FALSE, NULL); if (!stm->event) { cubeb_stream_destroy(stm); return CUBEB_ERROR; } /* cubeb_buffer_callback will be called during waveOutOpen, so all other initialization must be complete before calling it. */ r = waveOutOpen(&stm->waveout, WAVE_MAPPER, &wfx.Format, (DWORD_PTR) cubeb_buffer_callback, (DWORD_PTR) stm, CALLBACK_FUNCTION); if (r != MMSYSERR_NOERROR) { cubeb_stream_destroy(stm); return CUBEB_ERROR; } r = waveOutPause(stm->waveout); if (r != MMSYSERR_NOERROR) { cubeb_stream_destroy(stm); return CUBEB_ERROR; } for (i = 0; i < NBUFS; ++i) { WAVEHDR * hdr = &stm->buffers[i]; hdr->lpData = calloc(1, bufsz); assert(hdr->lpData); hdr->dwBufferLength = bufsz; hdr->dwFlags = 0; r = waveOutPrepareHeader(stm->waveout, hdr, sizeof(*hdr)); if (r != MMSYSERR_NOERROR) { cubeb_stream_destroy(stm); return CUBEB_ERROR; } cubeb_refill_stream(stm); } *stream = stm; return CUBEB_OK; }
static int winmm_stream_init(cubeb * context, cubeb_stream ** stream, char const * stream_name, cubeb_devid input_device, cubeb_stream_params * input_stream_params, cubeb_devid output_device, cubeb_stream_params * output_stream_params, unsigned int latency_frames, cubeb_data_callback data_callback, cubeb_state_callback state_callback, void * user_ptr) { MMRESULT r; WAVEFORMATEXTENSIBLE wfx; cubeb_stream * stm; int i; size_t bufsz; XASSERT(context); XASSERT(stream); if (input_stream_params) { /* Capture support not yet implemented. */ return CUBEB_ERROR_NOT_SUPPORTED; } if (input_device || output_device) { /* Device selection not yet implemented. */ return CUBEB_ERROR_DEVICE_UNAVAILABLE; } *stream = NULL; memset(&wfx, 0, sizeof(wfx)); if (output_stream_params->channels > 2) { wfx.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE; wfx.Format.cbSize = sizeof(wfx) - sizeof(wfx.Format); } else { wfx.Format.wFormatTag = WAVE_FORMAT_PCM; if (output_stream_params->format == CUBEB_SAMPLE_FLOAT32LE) { wfx.Format.wFormatTag = WAVE_FORMAT_IEEE_FLOAT; } wfx.Format.cbSize = 0; } wfx.Format.nChannels = output_stream_params->channels; wfx.Format.nSamplesPerSec = output_stream_params->rate; /* XXX fix channel mappings */ wfx.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; switch (output_stream_params->format) { case CUBEB_SAMPLE_S16LE: wfx.Format.wBitsPerSample = 16; wfx.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; break; case CUBEB_SAMPLE_FLOAT32LE: wfx.Format.wBitsPerSample = 32; wfx.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; break; default: return CUBEB_ERROR_INVALID_FORMAT; } wfx.Format.nBlockAlign = (wfx.Format.wBitsPerSample * wfx.Format.nChannels) / 8; wfx.Format.nAvgBytesPerSec = wfx.Format.nSamplesPerSec * wfx.Format.nBlockAlign; wfx.Samples.wValidBitsPerSample = wfx.Format.wBitsPerSample; EnterCriticalSection(&context->lock); /* CUBEB_STREAM_MAX is a horrible hack to avoid a situation where, when many streams are active at once, a subset of them will not consume (via playback) or release (via waveOutReset) their buffers. */ if (context->active_streams >= CUBEB_STREAM_MAX) { LeaveCriticalSection(&context->lock); return CUBEB_ERROR; } context->active_streams += 1; LeaveCriticalSection(&context->lock); stm = calloc(1, sizeof(*stm)); XASSERT(stm); stm->context = context; stm->params = *output_stream_params; stm->data_callback = data_callback; stm->state_callback = state_callback; stm->user_ptr = user_ptr; stm->written = 0; uint32_t latency_ms = latency_frames * 1000 / output_stream_params->rate; if (latency_ms < context->minimum_latency_ms) { latency_ms = context->minimum_latency_ms; } bufsz = (size_t) (stm->params.rate / 1000.0 * latency_ms * bytes_per_frame(stm->params) / NBUFS); if (bufsz % bytes_per_frame(stm->params) != 0) { bufsz += bytes_per_frame(stm->params) - (bufsz % bytes_per_frame(stm->params)); } XASSERT(bufsz % bytes_per_frame(stm->params) == 0); stm->buffer_size = bufsz; InitializeCriticalSection(&stm->lock); stm->event = CreateEvent(NULL, FALSE, FALSE, NULL); if (!stm->event) { winmm_stream_destroy(stm); return CUBEB_ERROR; } stm->soft_volume = -1.0; /* winmm_buffer_callback will be called during waveOutOpen, so all other initialization must be complete before calling it. */ r = waveOutOpen(&stm->waveout, WAVE_MAPPER, &wfx.Format, (DWORD_PTR) winmm_buffer_callback, (DWORD_PTR) stm, CALLBACK_FUNCTION); if (r != MMSYSERR_NOERROR) { winmm_stream_destroy(stm); return CUBEB_ERROR; } r = waveOutPause(stm->waveout); if (r != MMSYSERR_NOERROR) { winmm_stream_destroy(stm); return CUBEB_ERROR; } for (i = 0; i < NBUFS; ++i) { WAVEHDR * hdr = &stm->buffers[i]; hdr->lpData = calloc(1, bufsz); XASSERT(hdr->lpData); hdr->dwBufferLength = bufsz; hdr->dwFlags = 0; r = waveOutPrepareHeader(stm->waveout, hdr, sizeof(*hdr)); if (r != MMSYSERR_NOERROR) { winmm_stream_destroy(stm); return CUBEB_ERROR; } winmm_refill_stream(stm); } *stream = stm; return CUBEB_OK; }
static void winmm_refill_stream(cubeb_stream * stm) { WAVEHDR * hdr; long got; long wanted; MMRESULT r; EnterCriticalSection(&stm->lock); stm->free_buffers += 1; XASSERT(stm->free_buffers > 0 && stm->free_buffers <= NBUFS); if (stm->draining) { LeaveCriticalSection(&stm->lock); if (stm->free_buffers == NBUFS) { stm->state_callback(stm, stm->user_ptr, CUBEB_STATE_DRAINED); } SetEvent(stm->event); return; } if (stm->shutdown) { LeaveCriticalSection(&stm->lock); SetEvent(stm->event); return; } hdr = winmm_get_next_buffer(stm); wanted = (DWORD) stm->buffer_size / bytes_per_frame(stm->params); /* It is assumed that the caller is holding this lock. It must be dropped during the callback to avoid deadlocks. */ LeaveCriticalSection(&stm->lock); got = stm->data_callback(stm, stm->user_ptr, NULL, hdr->lpData, wanted); EnterCriticalSection(&stm->lock); if (got < 0) { LeaveCriticalSection(&stm->lock); /* XXX handle this case */ XASSERT(0); return; } else if (got < wanted) { stm->draining = 1; } stm->written += got; XASSERT(hdr->dwFlags & WHDR_PREPARED); hdr->dwBufferLength = got * bytes_per_frame(stm->params); XASSERT(hdr->dwBufferLength <= stm->buffer_size); if (stm->soft_volume != -1.0) { if (stm->params.format == CUBEB_SAMPLE_FLOAT32NE) { float * b = (float *) hdr->lpData; uint32_t i; for (i = 0; i < got * stm->params.channels; i++) { b[i] *= stm->soft_volume; } } else { short * b = (short *) hdr->lpData; uint32_t i; for (i = 0; i < got * stm->params.channels; i++) { b[i] = (short) (b[i] * stm->soft_volume); } } } r = waveOutWrite(stm->waveout, hdr, sizeof(*hdr)); if (r != MMSYSERR_NOERROR) { LeaveCriticalSection(&stm->lock); stm->state_callback(stm, stm->user_ptr, CUBEB_STATE_ERROR); return; } LeaveCriticalSection(&stm->lock); }
static void refill_stream(cubeb_stream * stm, int prefill) { VOID * p1, * p2; DWORD p1sz, p2sz; HRESULT rv; long dt; /* calculate how much has played since last refill */ DWORD play, write; rv = stm->buffer->GetCurrentPosition(&play, &write); assert(rv == DS_OK); long gap = write - play; if (gap < 0) { gap += stm->buffer_size; } #if 1 dt = GetTickCount() - stm->last_refill; if (!prefill) { double buflen = (double) (stm->buffer_size - gap) / bytes_per_frame(stm->params) / stm->params.rate * 1000; if (dt > buflen) { fprintf(stderr, "*** buffer wrap (%ld, %f, %f)***\n", dt, buflen, dt - buflen); stm->slipped += (dt - buflen) / 1000.0 * bytes_per_frame(stm->params) * stm->params.rate; } } #endif unsigned long writepos = stm->written % stm->buffer_size; long playsz = 0; if (write < writepos) { playsz = write + stm->buffer_size - writepos; } else { playsz = write - writepos; } /* can't write between play and write cursors */ playsz -= gap; if (playsz < 0) { #if 0 fprintf(stderr, "** negcapped, dt=%u real nwl=%ld p=%u w=%u g=%ld wo=%u **\n", dt, playsz, play, write, gap, writepos); #endif return; } if (prefill) { playsz = stm->buffer_size; } playsz -= bytes_per_frame(stm->params); /* no space to refill */ if (playsz <= 0) return; /*assert(writepos >= write && ((writepos + playsz) % stm->buffer_size) < play);*/ /* assumptions: buffer with w==p is full or empty we know total writes is stm->written so w==p and stm->written%stm->buffer_size==0 full or empty need abs play pos to determine rel play pos is (write + stm->buffer_size) - play (0 + 10) - 0 -> 10 -> also assumes buffer is full absplayed must be between stm->written-stm->buffer_size and stm->written. XXX want prefill logic to work anytime as we will eventually call it from start() */ rv = stm->buffer->Lock(writepos, playsz, &p1, &p1sz, &p2, &p2sz, 0); if (rv == DSERR_BUFFERLOST) { stm->buffer->Restore(); rv = stm->buffer->Lock(writepos, playsz, &p1, &p1sz, &p2, &p2sz, 0); } assert(rv == DS_OK); assert(p1sz % bytes_per_frame(stm->params) == 0); assert(p2sz % bytes_per_frame(stm->params) == 0); int r = stm->data_callback(stm, stm->user_ptr, p1, p1sz / bytes_per_frame(stm->params)); if (p2 && r == CUBEB_OK) { r = stm->data_callback(stm, stm->user_ptr, p2, p2sz / bytes_per_frame(stm->params)); } else { p2sz = 0; } #if 0 // XXX fix EOS/drain handling if (r == CUBEB_EOS) { LPDIRECTSOUNDNOTIFY notify; rv = stm->buffer->QueryInterface(IID_IDirectSoundNotify, (LPVOID *) ¬ify); assert(rv == DS_OK); DSBPOSITIONNOTIFY note; note.dwOffset = (writepos + p1sz + p2sz) % stm->buffer_size; note.hEventNotify = stm->context->streams_event; if (notify->SetNotificationPositions(1, ¬e) != DS_OK) { /* XXX free resources */ assert(false); } notify->Release(); stm->draining = 1; } #endif stm->last_refill = GetTickCount(); stm->written += p1sz + p2sz; rv = stm->buffer->Unlock(p1, p1sz, p2, p2sz); assert(rv == DS_OK); }
CString DSoundBuf::Init( DSound &ds, DSoundBuf::hw hardware, int iChannels, int iSampleRate, int iSampleBits, int iWriteAhead ) { m_iChannels = iChannels; m_iSampleRate = iSampleRate; m_iSampleBits = iSampleBits; m_iWriteAhead = iWriteAhead * bytes_per_frame(); m_iVolume = -1; /* unset */ m_bBufferLocked = false; m_iWriteCursorPos = m_iWriteCursor = m_iBufferBytesFilled = 0; m_iExtraWriteahead = 0; m_iLastPosition = 0; m_bPlaying = false; ZERO( m_iLastCursors ); /* The size of the actual DSound buffer. This can be large; we generally * won't fill it completely. */ m_iBufferSize = 1024*64; m_iBufferSize = max( m_iBufferSize, m_iWriteAhead ); WAVEFORMATEX waveformat; memset( &waveformat, 0, sizeof(waveformat) ); waveformat.cbSize = 0; waveformat.wFormatTag = WAVE_FORMAT_PCM; bool NeedCtrlFrequency = false; if( m_iSampleRate == DYNAMIC_SAMPLERATE ) { m_iSampleRate = 44100; NeedCtrlFrequency = true; } int bytes = m_iSampleBits / 8; waveformat.wBitsPerSample = WORD(m_iSampleBits); waveformat.nChannels = WORD(m_iChannels); waveformat.nSamplesPerSec = DWORD(m_iSampleRate); waveformat.nBlockAlign = WORD(bytes*m_iChannels); waveformat.nAvgBytesPerSec = m_iSampleRate * bytes*m_iChannels; /* Try to create the secondary buffer */ DSBUFFERDESC format; memset( &format, 0, sizeof(format) ); format.dwSize = sizeof(format); #ifdef _XBOX format.dwFlags = 0; #else format.dwFlags = DSBCAPS_GETCURRENTPOSITION2 | DSBCAPS_GLOBALFOCUS | DSBCAPS_CTRLVOLUME; #endif #ifndef _XBOX /* Don't use DSBCAPS_STATIC. It's meant for static buffers, and we * only use streaming buffers. */ if( hardware == HW_HARDWARE ) format.dwFlags |= DSBCAPS_LOCHARDWARE; else format.dwFlags |= DSBCAPS_LOCSOFTWARE; #endif if( NeedCtrlFrequency ) format.dwFlags |= DSBCAPS_CTRLFREQUENCY; format.dwBufferBytes = m_iBufferSize; #ifndef _XBOX format.dwReserved = 0; #else DSMIXBINVOLUMEPAIR dsmbvp[8] = { { DSMIXBIN_FRONT_LEFT, DSBVOLUME_MAX }, // left channel { DSMIXBIN_FRONT_RIGHT, DSBVOLUME_MAX }, // right channel { DSMIXBIN_FRONT_CENTER, DSBVOLUME_MAX }, // left channel { DSMIXBIN_FRONT_CENTER, DSBVOLUME_MAX }, // right channel { DSMIXBIN_BACK_LEFT, DSBVOLUME_MAX }, // left channel { DSMIXBIN_BACK_RIGHT, DSBVOLUME_MAX }, // right channel { DSMIXBIN_LOW_FREQUENCY, DSBVOLUME_MAX }, // left channel { DSMIXBIN_LOW_FREQUENCY, DSBVOLUME_MAX } // right channel }; DSMIXBINS dsmb; dsmb.dwMixBinCount = 8; dsmb.lpMixBinVolumePairs = dsmbvp; format.lpMixBins = &dsmb; #endif format.lpwfxFormat = &waveformat; HRESULT hr = ds.GetDS()->CreateSoundBuffer( &format, &m_pBuffer, NULL ); if( FAILED(hr) ) return hr_ssprintf( hr, "CreateSoundBuffer failed" ); #ifndef _XBOX /* I'm not sure this should ever be needed, but ... */ DSBCAPS bcaps; bcaps.dwSize=sizeof(bcaps); hr = m_pBuffer->GetCaps( &bcaps ); if( FAILED(hr) ) return hr_ssprintf( hr, "m_pBuffer->GetCaps" ); if( int(bcaps.dwBufferBytes) != m_iBufferSize ) { LOG->Warn( "bcaps.dwBufferBytes (%i) != m_iBufferSize(%i); adjusting", bcaps.dwBufferBytes, m_iBufferSize ); m_iBufferSize = bcaps.dwBufferBytes; m_iWriteAhead = min( m_iWriteAhead, m_iBufferSize ); } if( !(bcaps.dwFlags & DSBCAPS_CTRLVOLUME) ) LOG->Warn( "Sound channel missing DSBCAPS_CTRLVOLUME" ); if( !(bcaps.dwFlags & DSBCAPS_GETCURRENTPOSITION2) ) LOG->Warn( "Sound channel missing DSBCAPS_GETCURRENTPOSITION2" ); DWORD got; hr = m_pBuffer->GetFormat( &waveformat, sizeof(waveformat), &got ); if( FAILED(hr) ) LOG->Warn( hr_ssprintf(hr, "GetFormat on secondary buffer") ); else if( (int) waveformat.nSamplesPerSec != m_iSampleRate ) LOG->Warn( "Secondary buffer set to %i instead of %i", waveformat.nSamplesPerSec, m_iSampleRate ); #endif m_pTempBuffer = new char[m_iBufferSize]; return ""; }
int S9xMovieOpen (const char* filename, bool8 read_only, uint8 sync_flags, uint8 sync_flags2) { FILE* fd; STREAM stream; int result; int fn; char movie_filename [_MAX_PATH]; #ifdef WIN32 _fullpath(movie_filename, filename, _MAX_PATH); #else strcpy(movie_filename, filename); #endif if(!(fd=fopen(movie_filename, "rb+"))) if(!(fd=fopen(movie_filename, "rb"))) return FILE_NOT_FOUND; else read_only = TRUE; const bool8 wasPaused = Settings.Paused; const uint32 prevFrameTime = Settings.FrameTime; // stop current movie before opening change_state(MOVIE_STATE_NONE); // read header if((result=read_movie_header(fd, &Movie))!=SUCCESS) { fclose(fd); return result; } read_movie_extrarominfo(fd, &Movie); fn=dup(fileno(fd)); fclose(fd); // apparently this lseek is necessary lseek(fn, Movie.SaveStateOffset, SEEK_SET); if(!(stream=REOPEN_STREAM(fn, "rb"))) return FILE_NOT_FOUND; // store previous, before changing to the movie's settings store_previous_settings(); // store default if (sync_flags & MOVIE_SYNC_DATA_EXISTS) { Settings.UseWIPAPUTiming = (sync_flags & MOVIE_SYNC_WIP1TIMING) ? TRUE : FALSE; Settings.SoundEnvelopeHeightReading = (sync_flags & MOVIE_SYNC_VOLUMEENVX) ? TRUE : FALSE; Settings.FakeMuteFix = (sync_flags & MOVIE_SYNC_FAKEMUTE) ? TRUE : FALSE; Settings.UpAndDown = (sync_flags & MOVIE_SYNC_LEFTRIGHT) ? TRUE : FALSE; // doesn't actually affect synchronization Settings.SoundSync = (sync_flags & MOVIE_SYNC_SYNCSOUND) ? TRUE : FALSE; // doesn't seem to affect synchronization Settings.InitFastROMSetting = (sync_flags2 & MOVIE_SYNC2_INIT_FASTROM) ? TRUE : FALSE; //Settings.ShutdownMaster = (sync_flags & MOVIE_SYNC_NOCPUSHUTDOWN) ? FALSE : TRUE; } // set from movie restore_movie_settings(); if(Movie.Opts & MOVIE_OPT_FROM_RESET) { Movie.State = MOVIE_STATE_PLAY; // prevent NSRT controller switching (in S9xPostRomInit) if(!Memory.LoadLastROM()) S9xReset(); Memory.ClearSRAM(false); // in case the SRAM read fails Movie.State = MOVIE_STATE_NONE; // save only SRAM for a from-reset snapshot result=(READ_STREAM(Memory.SRAM, 0x20000, stream) == 0x20000) ? SUCCESS : WRONG_FORMAT; } else { result=S9xUnfreezeFromStream(stream); } CLOSE_STREAM(stream); if(result!=SUCCESS) { return result; } if(!(fd=fopen(movie_filename, "rb+"))) if(!(fd=fopen(movie_filename, "rb"))) return FILE_NOT_FOUND; else read_only = TRUE; if(fseek(fd, Movie.ControllerDataOffset, SEEK_SET)) return WRONG_FORMAT; // read controller data Movie.File=fd; Movie.BytesPerFrame=bytes_per_frame(); Movie.InputBufferPtr=Movie.InputBuffer; uint32 to_read=Movie.BytesPerFrame * (Movie.MaxFrame+1); reserve_buffer_space(to_read); fread(Movie.InputBufferPtr, 1, to_read, fd); // read "baseline" controller data if(Movie.MaxFrame) read_frame_controller_data(); strncpy(Movie.Filename, movie_filename, _MAX_PATH); Movie.Filename[_MAX_PATH-1]='\0'; Movie.CurrentFrame=0; Movie.ReadOnly=read_only; change_state(MOVIE_STATE_PLAY); Settings.Paused = wasPaused; Settings.FrameTime = prevFrameTime; // restore emulation speed Movie.RecordedThisSession = false; S9xUpdateFrameCounter(-1); Movie.RequiresReset = false; S9xMessage(S9X_INFO, S9X_MOVIE_INFO, MOVIE_INFO_REPLAY); return SUCCESS; }
bool DSoundBuf::get_output_buf( char **pBuffer, unsigned *pBufferSize, int iChunksize ) { ASSERT( !m_bBufferLocked ); iChunksize *= bytes_per_frame(); DWORD iCursorStart, iCursorEnd; HRESULT result; /* It's easiest to think of the cursor as a block, starting and ending at * the two values returned by GetCurrentPosition, that we can't write to. */ result = m_pBuffer->GetCurrentPosition( &iCursorStart, &iCursorEnd ); #ifndef _XBOX if( result == DSERR_BUFFERLOST ) { m_pBuffer->Restore(); result = m_pBuffer->GetCurrentPosition( &iCursorStart, &iCursorEnd ); } if( result != DS_OK ) { LOG->Warn( hr_ssprintf(result, "DirectSound::GetCurrentPosition failed") ); return false; } #endif memmove( &m_iLastCursors[0][0], &m_iLastCursors[1][0], sizeof(int)*6 ); m_iLastCursors[3][0] = iCursorStart; m_iLastCursors[3][1] = iCursorEnd; /* Some cards (Creative AudioPCI) have a no-write area even when not playing. I'm not * sure what that means, but it breaks the assumption that we can fill the whole writeahead * when prebuffering. */ if( !m_bPlaying ) iCursorEnd = iCursorStart; /* * Some cards (Game Theater XP 7.1 hercwdm.sys 5.12.01.4101 [466688b, 01-10-2003]) * have odd behavior when starting a sound: the start/end cursors go: * * 0,0 end cursor forced equal to start above (normal) * 4608, 1764 end cursor trailing the write cursor; except with old emulated * WaveOut devices, this shouldn't happen; it indicates that the * driver expects almost the whole buffer to be filled. Also, the * play cursor is too far ahead from the last call for the amount * of actual time passed. * 704, XXX start cursor moves back to where it should be. I don't have an exact * end cursor position, but in general from now on it stays about 5kb * ahead of start (which is where it should be). * * The second call is completely wrong; both the start and end cursors are meaningless. * Detect this: if the end cursor is close behind the start cursor, don't do anything. * (We can't; we have no idea what the cursors actually are.) */ { int iPrefetch = iCursorEnd - iCursorStart; wrap( iPrefetch, m_iBufferSize ); if( m_iBufferSize - iPrefetch < 1024*4 ) { LOG->Trace( "Strange DirectSound cursor ignored: %i..%i", iCursorStart, iCursorEnd ); return false; } } /* Update m_iBufferBytesFilled. */ { int iFirstByteFilled = m_iWriteCursor - m_iBufferBytesFilled; wrap( iFirstByteFilled, m_iBufferSize ); /* The number of bytes that have been played since the last time we got here: */ int bytes_played = iCursorStart - iFirstByteFilled; wrap( bytes_played, m_iBufferSize ); m_iBufferBytesFilled -= bytes_played; m_iBufferBytesFilled = max( 0, m_iBufferBytesFilled ); if( m_iExtraWriteahead ) { int used = min( m_iExtraWriteahead, bytes_played ); CString s = ssprintf("used %i of %i (%i..%i)", used, m_iExtraWriteahead, iCursorStart, iCursorEnd ); s += "; last: "; for( int i = 0; i < 4; ++i ) s += ssprintf( "%i, %i; ", m_iLastCursors[i][0], m_iLastCursors[i][1] ); LOG->Trace("%s", s.c_str()); m_iWriteAhead -= used; m_iExtraWriteahead -= used; } } CheckWriteahead( iCursorStart, iCursorEnd ); CheckUnderrun( iCursorStart, iCursorEnd ); /* If we already have enough bytes written ahead, stop. */ if( m_iBufferBytesFilled > m_iWriteAhead ) return false; int iNumBytesEmpty = m_iWriteAhead - m_iBufferBytesFilled; /* num_bytes_empty is the amount of free buffer space. If it's * too small, come back later. */ if( iNumBytesEmpty < iChunksize ) return false; // LOG->Trace("gave %i at %i (%i, %i) %i filled", iNumBytesEmpty, m_iWriteCursor, cursor, write, m_iBufferBytesFilled); /* Lock the audio buffer. */ result = m_pBuffer->Lock( m_iWriteCursor, iNumBytesEmpty, (LPVOID *) &m_pLockedBuf1, (DWORD *) &m_iLockedSize1, (LPVOID *) &m_pLockedBuf2, (DWORD *) &m_iLockedSize2, 0 ); #ifndef _XBOX if( result == DSERR_BUFFERLOST ) { m_pBuffer->Restore(); result = m_pBuffer->Lock( m_iWriteCursor, iNumBytesEmpty, (LPVOID *) &m_pLockedBuf1, (DWORD *) &m_iLockedSize1, (LPVOID *) &m_pLockedBuf2, (DWORD *) &m_iLockedSize2, 0 ); } #endif if( result != DS_OK ) { LOG->Warn( hr_ssprintf(result, "Couldn't lock the DirectSound buffer.") ); return false; } *pBuffer = m_pTempBuffer; *pBufferSize = m_iLockedSize1 + m_iLockedSize2; m_iWriteCursor += iNumBytesEmpty; if( m_iWriteCursor >= m_iBufferSize ) m_iWriteCursor -= m_iBufferSize; m_iBufferBytesFilled += iNumBytesEmpty; m_iWriteCursorPos += iNumBytesEmpty / bytes_per_frame(); m_bBufferLocked = true; return true; }
// create or discard buffer group if necessary void AudioFilterNode::updateBufferGroup() { status_t err; size_t inputSize = bytes_per_frame(m_input.format.u.raw_audio); size_t outputSize = bytes_per_frame(m_output.format.u.raw_audio); if(m_input.source == media_source::null || m_output.destination == media_destination::null || inputSize >= outputSize) { PRINT(("###### NO BUFFER GROUP NEEDED\n")); // no internal buffer group needed if(m_bufferGroup) { // does this block? +++++ delete m_bufferGroup; m_bufferGroup = 0; } return; } int32 bufferCount = EventLatency() / BufferDuration() + 1 + 1; // +++++ // [e.moon 27sep99] this is a reasonable number of buffers, // but it fails with looped file-player node in BeOS 4.5.2. // if(bufferCount < 5) bufferCount = 5; // if(bufferCount < 3) // bufferCount = 3; if(m_bufferGroup) { // is the current group sufficient? int32 curBufferCount; err = m_bufferGroup->CountBuffers(&curBufferCount); if(err == B_OK && curBufferCount >= bufferCount) { BBuffer* buf = m_bufferGroup->RequestBuffer( outputSize, -1); if(buf) { // yup buf->Recycle(); return; } } // nope, delete it to make way for the new one delete m_bufferGroup; m_bufferGroup = 0; } // create buffer group PRINT(( "##### AudioFilterNode::updateBufferGroup():\n" "##### creating %ld buffers of size %ld\n", bufferCount, m_output.format.u.raw_audio.buffer_size)); m_bufferGroup = new BBufferGroup( m_output.format.u.raw_audio.buffer_size, bufferCount); }
void MixerCore::_MixThread() { // The broken BeOS R5 multiaudio node starts with time 0, // then publishes negative times for about 50ms, publishes 0 // again until it finally reaches time values > 0 if (!LockFromMixThread()) return; bigtime_t start = fTimeSource->Now(); Unlock(); while (start <= 0) { TRACE("MixerCore: delaying _MixThread start, timesource is at %Ld\n", start); snooze(5000); if (!LockFromMixThread()) return; start = fTimeSource->Now(); Unlock(); } if (!LockFromMixThread()) return; bigtime_t latency = max((bigtime_t)3600, bigtime_t(0.4 * buffer_duration( fOutput->MediaOutput().format.u.raw_audio))); // TODO: when the format changes while running, everything is wrong! bigtime_t bufferRequestTimeout = buffer_duration( fOutput->MediaOutput().format.u.raw_audio) / 2; TRACE("MixerCore: starting _MixThread at %Ld with latency %Ld and " "downstream latency %Ld, bufferRequestTimeout %Ld\n", start, latency, fDownstreamLatency, bufferRequestTimeout); // We must read from the input buffer at a position (pos) that is always // a multiple of fMixBufferFrameCount. int64 temp = frames_for_duration(fMixBufferFrameRate, start); int64 frameBase = ((temp / fMixBufferFrameCount) + 1) * fMixBufferFrameCount; bigtime_t timeBase = duration_for_frames(fMixBufferFrameRate, frameBase); Unlock(); TRACE("MixerCore: starting _MixThread, start %Ld, timeBase %Ld, " "frameBase %Ld\n", start, timeBase, frameBase); ASSERT(fMixBufferFrameCount > 0); #if DEBUG uint64 bufferIndex = 0; #endif typedef RtList<chan_info> chan_info_list; chan_info_list inputChanInfos[MAX_CHANNEL_TYPES]; BStackOrHeapArray<chan_info_list, 16> mixChanInfos(fMixBufferChannelCount); // TODO: this does not support changing output channel count bigtime_t eventTime = timeBase; int64 framePos = 0; for (;;) { if (!LockFromMixThread()) return; bigtime_t waitUntil = fTimeSource->RealTimeFor(eventTime, 0) - latency - fDownstreamLatency; Unlock(); status_t rv = acquire_sem_etc(fMixThreadWaitSem, 1, B_ABSOLUTE_TIMEOUT, waitUntil); if (rv == B_INTERRUPTED) continue; if (rv != B_TIMED_OUT && rv < B_OK) return; if (!LockWithTimeout(10000)) { ERROR("MixerCore: LockWithTimeout failed\n"); continue; } // no inputs or output muted, skip further processing and just send an // empty buffer if (fInputs->IsEmpty() || fOutput->IsMuted()) { int size = fOutput->MediaOutput().format.u.raw_audio.buffer_size; BBuffer* buffer = fBufferGroup->RequestBuffer(size, bufferRequestTimeout); if (buffer != NULL) { memset(buffer->Data(), 0, size); // fill in the buffer header media_header* hdr = buffer->Header(); hdr->type = B_MEDIA_RAW_AUDIO; hdr->size_used = size; hdr->time_source = fTimeSource->ID(); hdr->start_time = eventTime; if (fNode->SendBuffer(buffer, fOutput) != B_OK) { #if DEBUG ERROR("MixerCore: SendBuffer failed for buffer %Ld\n", bufferIndex); #else ERROR("MixerCore: SendBuffer failed\n"); #endif buffer->Recycle(); } } else { #if DEBUG ERROR("MixerCore: RequestBuffer failed for buffer %Ld\n", bufferIndex); #else ERROR("MixerCore: RequestBuffer failed\n"); #endif } goto schedule_next_event; } int64 currentFramePos; currentFramePos = frameBase + framePos; // mix all data from all inputs into the mix buffer ASSERT(currentFramePos % fMixBufferFrameCount == 0); PRINT(4, "create new buffer event at %Ld, reading input frames at " "%Ld\n", eventTime, currentFramePos); // Init the channel information for each MixerInput. for (int i = 0; MixerInput* input = Input(i); i++) { int count = input->GetMixerChannelCount(); for (int channel = 0; channel < count; channel++) { int type; const float* base; uint32 sampleOffset; float gain; if (!input->GetMixerChannelInfo(channel, currentFramePos, eventTime, &base, &sampleOffset, &type, &gain)) { continue; } if (type < 0 || type >= MAX_CHANNEL_TYPES) continue; chan_info* info = inputChanInfos[type].Create(); info->base = (const char*)base; info->sample_offset = sampleOffset; info->gain = gain; } } for (int channel = 0; channel < fMixBufferChannelCount; channel++) { int sourceCount = fOutput->GetOutputChannelSourceCount(channel); for (int i = 0; i < sourceCount; i++) { int type; float gain; fOutput->GetOutputChannelSourceInfoAt(channel, i, &type, &gain); if (type < 0 || type >= MAX_CHANNEL_TYPES) continue; int count = inputChanInfos[type].CountItems(); for (int j = 0; j < count; j++) { chan_info* info = inputChanInfos[type].ItemAt(j); chan_info* newInfo = mixChanInfos[channel].Create(); newInfo->base = info->base; newInfo->sample_offset = info->sample_offset; newInfo->gain = info->gain * gain; } } } memset(fMixBuffer, 0, fMixBufferChannelCount * fMixBufferFrameCount * sizeof(float)); for (int channel = 0; channel < fMixBufferChannelCount; channel++) { PRINT(5, "_MixThread: channel %d has %d sources\n", channel, mixChanInfos[channel].CountItems()); int count = mixChanInfos[channel].CountItems(); for (int i = 0; i < count; i++) { chan_info* info = mixChanInfos[channel].ItemAt(i); PRINT(5, "_MixThread: base %p, sample-offset %2d, gain %.3f\n", info->base, info->sample_offset, info->gain); // This looks slightly ugly, but the current GCC will generate // the fastest code this way. // fMixBufferFrameCount is always > 0. uint32 dstSampleOffset = fMixBufferChannelCount * sizeof(float); uint32 srcSampleOffset = info->sample_offset; register char* dst = (char*)&fMixBuffer[channel]; register char* src = (char*)info->base; register float gain = info->gain; register int j = fMixBufferFrameCount; do { *(float*)dst += *(const float*)src * gain; dst += dstSampleOffset; src += srcSampleOffset; } while (--j); } } // request a buffer BBuffer* buffer; buffer = fBufferGroup->RequestBuffer( fOutput->MediaOutput().format.u.raw_audio.buffer_size, bufferRequestTimeout); if (buffer != NULL) { // copy data from mix buffer into output buffer for (int i = 0; i < fMixBufferChannelCount; i++) { fResampler[i]->Resample( reinterpret_cast<char*>(fMixBuffer) + i * sizeof(float), fMixBufferChannelCount * sizeof(float), fMixBufferFrameCount, reinterpret_cast<char*>(buffer->Data()) + (i * bytes_per_sample( fOutput->MediaOutput().format.u.raw_audio)), bytes_per_frame(fOutput->MediaOutput().format.u.raw_audio), frames_per_buffer( fOutput->MediaOutput().format.u.raw_audio), fOutputGain * fOutput->GetOutputChannelGain(i)); } PRINT(4, "send buffer, inframes %ld, outframes %ld\n", fMixBufferFrameCount, frames_per_buffer(fOutput->MediaOutput().format.u.raw_audio)); // fill in the buffer header media_header* hdr = buffer->Header(); hdr->type = B_MEDIA_RAW_AUDIO; hdr->size_used = fOutput->MediaOutput().format.u.raw_audio.buffer_size; hdr->time_source = fTimeSource->ID(); hdr->start_time = eventTime; // swap byte order if necessary fOutput->AdjustByteOrder(buffer); // send the buffer status_t res = fNode->SendBuffer(buffer, fOutput); if (res != B_OK) { #if DEBUG ERROR("MixerCore: SendBuffer failed for buffer %Ld\n", bufferIndex); #else ERROR("MixerCore: SendBuffer failed\n"); #endif buffer->Recycle(); } } else { #if DEBUG ERROR("MixerCore: RequestBuffer failed for buffer %Ld\n", bufferIndex); #else ERROR("MixerCore: RequestBuffer failed\n"); #endif } // make all lists empty for (int i = 0; i < MAX_CHANNEL_TYPES; i++) inputChanInfos[i].MakeEmpty(); for (int i = 0; i < fOutput->GetOutputChannelCount(); i++) mixChanInfos[i].MakeEmpty(); schedule_next_event: // schedule next event framePos += fMixBufferFrameCount; eventTime = timeBase + bigtime_t((1000000LL * framePos) / fMixBufferFrameRate); Unlock(); #if DEBUG bufferIndex++; #endif } }
void MixerInput::BufferReceived(BBuffer* buffer) { void* data; size_t size; bigtime_t start; bigtime_t buffer_duration; if (!fMixBuffer) { ERROR("MixerInput::BufferReceived: dropped incoming buffer as we " "don't have a mix buffer\n"); return; } data = buffer->Data(); size = buffer->SizeUsed(); start = buffer->Header()->start_time; buffer_duration = duration_for_frames(fInput.format.u.raw_audio.frame_rate, size / bytes_per_frame(fInput.format.u.raw_audio)); if (start < 0) { ERROR("MixerInput::BufferReceived: buffer with negative start time of " "%Ld dropped\n", start); return; } // swap the byte order of this buffer, if necessary if (fInputByteSwap) fInputByteSwap->Swap(data, size); int offset = frames_for_duration(fMixBufferFrameRate, start) % fMixBufferFrameCount; PRINT(4, "MixerInput::BufferReceived: buffer start %10Ld, offset %6d\n", start, offset); int in_frames = size / bytes_per_frame(fInput.format.u.raw_audio); double frames = ((double)in_frames * fMixBufferFrameRate) / fInput.format.u.raw_audio.frame_rate; int out_frames = int(frames); fFractionalFrames += frames - double(out_frames); if (fFractionalFrames >= 1.0) { fFractionalFrames -= 1.0; out_frames++; } // if fLastDataFrameWritten != -1, then we have a valid last position // and can do glitch compensation if (fLastDataFrameWritten >= 0) { int expected_frame = (fLastDataFrameWritten + 1) % fMixBufferFrameCount; if (offset != expected_frame) { // due to rounding and other errors, offset might be off by +/- 1 // this is not really a bad glitch, we just adjust the position if (offset == fLastDataFrameWritten) { // printf("MixerInput::BufferReceived: -1 frame GLITCH! last " // "frame was %ld, expected frame was %d, new frame is %d\n", // fLastDataFrameWritten, expected_frame, offset); offset = expected_frame; } else if (offset == ((fLastDataFrameWritten + 2) % fMixBufferFrameCount)) { // printf("MixerInput::BufferReceived: +1 frame GLITCH! last " // "frame was %ld, expected frame was %d, new frame is %d\n", // fLastDataFrameWritten, expected_frame, offset); offset = expected_frame; } else { printf("MixerInput::BufferReceived: GLITCH! last frame was " "%4ld, expected frame was %4d, new frame is %4d\n", fLastDataFrameWritten, expected_frame, offset); if (start > fLastDataAvailableTime) { if ((start - fLastDataAvailableTime) < (buffer_duration / 10)) { // buffer is less than 10% of buffer duration too late printf("short glitch, buffer too late, time delta " "%Ld\n", start - fLastDataAvailableTime); offset = expected_frame; out_frames++; } else { // buffer more than 10% of buffer duration too late // TODO: zerofill buffer printf("MAJOR glitch, buffer too late, time delta " "%Ld\n", start - fLastDataAvailableTime); } } else { // start <= fLastDataAvailableTime // the new buffer is too early if ((fLastDataAvailableTime - start) < (buffer_duration / 10)) { // buffer is less than 10% of buffer duration too early printf("short glitch, buffer too early, time delta " "%Ld\n", fLastDataAvailableTime - start); offset = expected_frame; out_frames--; if (out_frames < 1) out_frames = 1; } else { // buffer more than 10% of buffer duration too early // TODO: zerofill buffer printf("MAJOR glitch, buffer too early, time delta " "%Ld\n", fLastDataAvailableTime - start); } } } } } // printf("data arrived for %10Ld to %10Ld, storing at frames %ld to %ld\n", // start, // start + duration_for_frames(fInput.format.u.raw_audio.frame_rate, // frames_per_buffer(fInput.format.u.raw_audio)), offset, // offset + out_frames); if (offset + out_frames > fMixBufferFrameCount) { int out_frames1 = fMixBufferFrameCount - offset; int out_frames2 = out_frames - out_frames1; int in_frames1 = (out_frames1 * in_frames) / out_frames; int in_frames2 = in_frames - in_frames1; // printf("at %10Ld, data arrived for %10Ld to %10Ld, storing at " // "frames %ld to %ld and %ld to %ld\n", fCore->fTimeSource->Now(), // start, // start + duration_for_frames(fInput.format.u.raw_audio.frame_rate, // frames_per_buffer(fInput.format.u.raw_audio)), offset, // offset + out_frames1 - 1, 0, out_frames2 - 1); PRINT(3, "at %10Ld, data arrived for %10Ld to %10Ld, storing at " "frames %ld to %ld and %ld to %ld\n", fCore->fTimeSource->Now(), start, start + duration_for_frames(fInput.format.u.raw_audio.frame_rate, frames_per_buffer(fInput.format.u.raw_audio)), offset, offset + out_frames1 - 1, 0, out_frames2 - 1); PRINT(5, " in_frames %5d, out_frames %5d, in_frames1 %5d, " "out_frames1 %5d, in_frames2 %5d, out_frames2 %5d\n", in_frames, out_frames, in_frames1, out_frames1, in_frames2, out_frames2); fLastDataFrameWritten = out_frames2 - 1; // convert offset from frames into bytes offset *= sizeof(float) * fInputChannelCount; for (int i = 0; i < fInputChannelCount; i++) { fResampler[i]->Resample( reinterpret_cast<char*>(data) + i * bytes_per_sample(fInput.format.u.raw_audio), bytes_per_frame(fInput.format.u.raw_audio), in_frames1, reinterpret_cast<char*>(fInputChannelInfo[i].buffer_base) + offset, fInputChannelCount * sizeof(float), out_frames1, fInputChannelInfo[i].gain); fResampler[i]->Resample( reinterpret_cast<char*>(data) + i * bytes_per_sample(fInput.format.u.raw_audio) + in_frames1 * bytes_per_frame(fInput.format.u.raw_audio), bytes_per_frame(fInput.format.u.raw_audio), in_frames2, reinterpret_cast<char*>(fInputChannelInfo[i].buffer_base), fInputChannelCount * sizeof(float), out_frames2, fInputChannelInfo[i].gain); } } else { // printf("at %10Ld, data arrived for %10Ld to %10Ld, storing at " // "frames %ld to %ld\n", fCore->fTimeSource->Now(), start, // start + duration_for_frames(fInput.format.u.raw_audio.frame_rate, // frames_per_buffer(fInput.format.u.raw_audio)), offset, // offset + out_frames - 1); PRINT(3, "at %10Ld, data arrived for %10Ld to %10Ld, storing at " "frames %ld to %ld\n", fCore->fTimeSource->Now(), start, start + duration_for_frames(fInput.format.u.raw_audio.frame_rate, frames_per_buffer(fInput.format.u.raw_audio)), offset, offset + out_frames - 1); PRINT(5, " in_frames %5d, out_frames %5d\n", in_frames, out_frames); fLastDataFrameWritten = offset + out_frames - 1; // convert offset from frames into bytes offset *= sizeof(float) * fInputChannelCount; for (int i = 0; i < fInputChannelCount; i++) { fResampler[i]->Resample( reinterpret_cast<char*>(data) + i * bytes_per_sample(fInput.format.u.raw_audio), bytes_per_frame(fInput.format.u.raw_audio), in_frames, reinterpret_cast<char*>(fInputChannelInfo[i].buffer_base) + offset, fInputChannelCount * sizeof(float), out_frames, fInputChannelInfo[i].gain); } } fLastDataAvailableTime = start + buffer_duration; }
int S9xMovieOpen (const char* filename, bool8 read_only) { FILE* fd; STREAM stream; int result; int fn; if(!(fd=fopen(filename, read_only ? "rb" : "rb+"))) return FILE_NOT_FOUND; // stop current movie before opening change_state(MOVIE_STATE_NONE); // read header if((result=read_movie_header(fd, &Movie))!=SUCCESS) { fclose(fd); return result; } fn=dup(fileno(fd)); fclose(fd); // apparently this lseek is necessary lseek(fn, Movie.SaveStateOffset, SEEK_SET); if(!(stream=REOPEN_STREAM(fn, "rb"))) return FILE_NOT_FOUND; if(Movie.Opts & MOVIE_OPT_FROM_RESET) { S9xReset(); // save only SRAM for a from-reset snapshot result=(READ_STREAM(SRAM, 0x20000, stream) == 0x20000) ? SUCCESS : WRONG_FORMAT; } else { result=S9xUnfreezeFromStream(stream); } CLOSE_STREAM(stream); if(result!=SUCCESS) { return result; } if(!(fd=fopen(filename, read_only ? "rb" : "rb+"))) return FILE_NOT_FOUND; if(fseek(fd, Movie.ControllerDataOffset, SEEK_SET)) return WRONG_FORMAT; // read controller data Movie.File=fd; Movie.BytesPerFrame=bytes_per_frame(); Movie.InputBufferPtr=Movie.InputBuffer; uint32 to_read=Movie.BytesPerFrame * (Movie.MaxFrame+1); reserve_buffer_space(to_read); fread(Movie.InputBufferPtr, 1, to_read, fd); // read "baseline" controller data read_frame_controller_data(); strncpy(Movie.Filename, filename, _MAX_PATH); Movie.Filename[_MAX_PATH-1]='\0'; Movie.CurrentFrame=0; Movie.ReadOnly=read_only; change_state(MOVIE_STATE_PLAY); S9xMessage(S9X_INFO, S9X_MOVIE_INFO, MOVIE_INFO_REPLAY); return SUCCESS; }
static int directsound_stream_init(cubeb * context, cubeb_stream ** stream, char const * stream_name, cubeb_stream_params stream_params, unsigned int latency, cubeb_data_callback data_callback, cubeb_state_callback state_callback, void * user_ptr) { struct cubeb_list_node * node; assert(context); *stream = NULL; /* create primary buffer */ DSBUFFERDESC bd; bd.dwSize = sizeof(DSBUFFERDESC); bd.dwFlags = DSBCAPS_PRIMARYBUFFER; bd.dwBufferBytes = 0; bd.dwReserved = 0; bd.lpwfxFormat = NULL; bd.guid3DAlgorithm = DS3DALG_DEFAULT; LPDIRECTSOUNDBUFFER primary; if (FAILED(context->dsound->CreateSoundBuffer(&bd, &primary, NULL))) { return 1; } WAVEFORMATEXTENSIBLE wfx; wfx.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE; wfx.Format.nChannels = stream_params.channels; wfx.Format.nSamplesPerSec = stream_params.rate; wfx.Format.cbSize = sizeof(wfx) - sizeof(wfx.Format); /* XXX fix channel mappings */ wfx.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; switch (stream_params.format) { case CUBEB_SAMPLE_S16LE: wfx.Format.wBitsPerSample = 16; wfx.SubFormat = KSDATAFORMAT_SUBTYPE_PCM; break; case CUBEB_SAMPLE_FLOAT32LE: wfx.Format.wBitsPerSample = 32; wfx.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; break; default: return CUBEB_ERROR_INVALID_FORMAT; } wfx.Format.nBlockAlign = (wfx.Format.wBitsPerSample * wfx.Format.nChannels) / 8; wfx.Format.nAvgBytesPerSec = wfx.Format.nSamplesPerSec * wfx.Format.nBlockAlign; wfx.Samples.wValidBitsPerSample = wfx.Format.wBitsPerSample; if (FAILED(primary->SetFormat((LPWAVEFORMATEX) &wfx))) { /* XXX free primary */ return CUBEB_ERROR; } primary->Release(); cubeb_stream * stm = (cubeb_stream *) calloc(1, sizeof(*stm)); assert(stm); stm->context = context; stm->params = stream_params; stm->data_callback = data_callback; stm->state_callback = state_callback; stm->user_ptr = user_ptr; InitializeCriticalSection(&stm->lock); /* create secondary buffer */ bd.dwFlags = DSBCAPS_GETCURRENTPOSITION2 | DSBCAPS_GLOBALFOCUS | DSBCAPS_CTRLVOLUME | DSBCAPS_CTRLPOSITIONNOTIFY; bd.dwBufferBytes = (DWORD) (wfx.Format.nSamplesPerSec / 1000.0 * latency * bytes_per_frame(stream_params)); if (bd.dwBufferBytes % bytes_per_frame(stream_params) != 0) { bd.dwBufferBytes += bytes_per_frame(stream_params) - (bd.dwBufferBytes % bytes_per_frame(stream_params)); } bd.lpwfxFormat = (LPWAVEFORMATEX) &wfx; if (FAILED(context->dsound->CreateSoundBuffer(&bd, &stm->buffer, NULL))) { return CUBEB_ERROR; } stm->buffer_size = bd.dwBufferBytes; LPDIRECTSOUNDNOTIFY notify; if (stm->buffer->QueryInterface(IID_IDirectSoundNotify, (LPVOID *) ¬ify) != DS_OK) { /* XXX free resources */ return CUBEB_ERROR; } DSBPOSITIONNOTIFY note[3]; for (int i = 0; i < 3; ++i) { note[i].dwOffset = (stm->buffer_size / 4) * i; note[i].hEventNotify = context->streams_event; } if (notify->SetNotificationPositions(3, note) != DS_OK) { /* XXX free resources */ return CUBEB_ERROR; } notify->Release(); refill_stream(stm, 1); /* XXX remove this, just a test that double refill does not overwrite existing data */ refill_stream(stm, 0); uint64_t pos; cubeb_stream_get_position(stm, &pos); stm->node = (struct cubeb_list_node *) calloc(1, sizeof(*node)); stm->node->stream = stm; EnterCriticalSection(&context->lock); if (!context->streams) { context->streams = stm->node; } else { node = context->streams; while (node->next) { node = node->next; } node->next = stm->node; stm->node->prev = node; } LeaveCriticalSection(&context->lock); SetEvent(context->streams_event); *stream = stm; return CUBEB_OK; }