static void fs_eventlog_new_event(int64_t *event_time, int *event, uint8_t event_type) { FS_INIT(module); if (!g_log_events) { return; } int64_t t; if (event_time == NULL) { t = fs_get_monotonic_time(); } else { if (*event_time == 0) { *event_time = fs_get_monotonic_time(); } t = *event_time; } void *data = malloc(13); *((uint8_t *) data) = event_type; *((int64_t *) data + 5) = t; fs_mutex_lock(g_mutex); int e = ++g_event; *((int32_t *) data + 1) = e; g_event_list = g_list_append(g_event_list, data); g_event_count++; if (g_event_count > 1000000) { flush_events(); } fs_mutex_unlock(g_mutex); if (event) { *event = e; } }
static void fs_eventlog_update_event(int64_t event_time, int event, int64_t t1, int64_t t2) { FS_INIT(module); if (!g_log_events) { return; } if (t1 == 0) { t1 = fs_get_monotonic_time(); } if (t2 == 0) { t2 = t1; } t1 = t1 - event_time; t2 = t2 - event_time; void *data = malloc(13); *((uint8_t *) data) = 255; // event update *((int32_t *) data + 1) = event; *((uint32_t *) data + 5) = t1; *((uint32_t *) data + 9) = t2; fs_mutex_lock(g_mutex); g_event_list = g_list_append(g_event_list, data); g_event_count++; fs_mutex_unlock(g_mutex); }
int64_t fs_condition_get_wait_end_time(int period) { #if defined(USE_GLIB) return fs_get_monotonic_time() + period; #else return fs_get_current_time() + period; #endif }
void fs_ml_render_init() { g_frame_available_cond = fs_condition_create(); g_frame_available_mutex = fs_mutex_create(); g_start_new_frame_cond = fs_condition_create(); g_start_new_frame_mutex = fs_mutex_create(); g_buffer_swap_cond = fs_condition_create(); g_buffer_swap_mutex = fs_mutex_create(); g_epoch = fs_get_monotonic_time(); g_vblank_mutex = fs_mutex_create(); //fs_emu_stat_queue_init(&g_measured_vblank_times, VBLANK_TIMES_COUNT); if (fs_config_get_boolean("low_latency_vsync") == 1) { fs_log("using low latency vsync when full vsync is enabled\n"); g_fs_ml_video_sync_low_latency = 1; } else if (fs_config_get_boolean("low_latency_vsync") == 0) { fs_log("disabling use of low latency vsync\n"); g_fs_ml_video_sync_low_latency = 0; } }
static int queue_buffer(int stream, int16_t* data, int size) { if (g_fs_emu_benchmark_mode) { /* no audio output while benchmarking */ return 0; } if (g_fs_emu_benchmarking) { /* no audio output while benchmarking */ return 0; } //fs_log("fs_emu_queue_audio_buffer stream %d size %d\n", stream, size); audio_stream *s = g_streams[stream]; ALuint buffer = 0; fs_mutex_lock(s->mutex); //while (1) { buffer = FS_POINTER_TO_UINT(g_queue_pop_head(s->queue)); if (!buffer) { fs_log("no audio buffer available - dropping data\n"); fs_mutex_unlock(s->mutex); return 0; } s->buffers_queued += 1; // create a local copy while we have the lock //int buffers_queued = s->buffers_queued; fs_mutex_unlock(s->mutex); #if 0 /* for debugging, clear one of the stereo channels */ int16_t *d = data; for (int i = 0; i < size / 4; i++) { d++; *d = 0; d++; } #endif alBufferData(buffer, AL_FORMAT_STEREO16, data, size, s->frequency); check_al_error("alBufferData"); alSourceQueueBuffers(s->source, 1, &buffer); check_al_error("alSourceQueueBuffers"); ALint state; alGetSourcei(s->source, AL_SOURCE_STATE, &state); check_al_error("alGetSourcei (AL_SOURCE_STATE)"); if (state != AL_PLAYING) { g_fs_emu_audio_buffer_underrun_time = fs_get_monotonic_time(); g_fs_emu_audio_buffer_underruns += 1; // we have had a buffer underrun - we now wait until we have queued // some buffers //if (buffers_queued < s->min_buffers) { // // want more buffers //} //else { fs_log("restarting audio stream %d (buffer underrun)\n", stream); alSourcePlay(s->source); g_fs_emu_audio_stream_playing[stream] = 1; check_al_error("alSourcePlay"); //} } double want_volume = g_fs_emu_audio_want_volume[stream] * 0.9; if (want_volume != s->source_volume_current) { s->source_volume_current = want_volume; alSourcef(s->source, AL_GAIN, want_volume); } unqueue_old_buffers(stream); return buffer; }