void fs_ml_frame_update_end(int frame) { //printf("%d\n", frame); // in timed mode only (non-vsync), the video renderer is waiting for // a new frame signal fs_mutex_lock(g_frame_available_mutex); g_available_frame = frame; fs_condition_signal(g_frame_available_cond); fs_mutex_unlock(g_frame_available_mutex); if (g_fs_ml_video_sync) { fs_mutex_lock(g_start_new_frame_mutex); while (!g_start_new_frame) { fs_condition_wait (g_start_new_frame_cond, g_start_new_frame_mutex); } g_start_new_frame = 0; fs_mutex_unlock(g_start_new_frame_mutex); } else if (g_fs_ml_vblank_sync) { // emulation running independently on the video renderer } else if (g_fs_ml_benchmarking) { // run as fast as possible } else { // video renderer is waiting for a new frame -signal that a new // frame is ready //fs_condition_signal(g_video_cond); } }
static void fs_eventlog_new_event(int64_t *event_time, int *event, uint8_t event_type) { FS_INIT(module); if (!g_log_events) { return; } int64_t t; if (event_time == NULL) { t = fs_get_monotonic_time(); } else { if (*event_time == 0) { *event_time = fs_get_monotonic_time(); } t = *event_time; } void *data = malloc(13); *((uint8_t *) data) = event_type; *((int64_t *) data + 5) = t; fs_mutex_lock(g_mutex); int e = ++g_event; *((int32_t *) data + 1) = e; g_event_list = g_list_append(g_event_list, data); g_event_count++; if (g_event_count > 1000000) { flush_events(); } fs_mutex_unlock(g_mutex); if (event) { *event = e; } }
static void fs_eventlog_update_event(int64_t event_time, int event, int64_t t1, int64_t t2) { FS_INIT(module); if (!g_log_events) { return; } if (t1 == 0) { t1 = fs_get_monotonic_time(); } if (t2 == 0) { t2 = t1; } t1 = t1 - event_time; t2 = t2 - event_time; void *data = malloc(13); *((uint8_t *) data) = 255; // event update *((int32_t *) data + 1) = event; *((uint32_t *) data + 5) = t1; *((uint32_t *) data + 9) = t2; fs_mutex_lock(g_mutex); g_event_list = g_list_append(g_event_list, data); g_event_count++; fs_mutex_unlock(g_mutex); }
static void unqueue_old_buffers(int stream) { audio_stream *s = g_streams[stream]; ALint old_buffers = 0; fs_mutex_lock(s->mutex); // locking here because unqueue_old_buffers can be run called from // both the video thread and the emulation thread (consider changing this, // perhaps even have a separate thread for periodically unqueuing). alGetSourcei(s->source, AL_BUFFERS_PROCESSED, &old_buffers); check_al_error("alGetSourcei (AL_BUFFERS_PROCESSED)"); if (old_buffers > 0) { ALuint buffers[MAX_BUFFERS]; old_buffers = MIN(old_buffers, MAX_BUFFERS); alSourceUnqueueBuffers(s->source, old_buffers, buffers); if (check_al_error("alSourceUnqueueBuffers") != AL_NO_ERROR) { fs_log("while trying to unqueue %d buffers\n"); } for (int i = 0; i < old_buffers; i++) { g_queue_push_tail(s->queue, FS_UINT_TO_POINTER(buffers[i])); } s->buffers_queued -= old_buffers; } fs_mutex_unlock(s->mutex); }
static int fs_emu_get_netplay_input_event() { fs_mutex_lock(g_input_event_mutex); int input_event = FS_POINTER_TO_INT(g_queue_pop_tail( g_input_event_queue)); fs_mutex_unlock(g_input_event_mutex); return input_event; }
static void fs_emu_queue_netplay_input_event(int input_event) { if (input_event == 0) { fs_log("WARNING: tried to queue input event 0\n"); return; } fs_mutex_lock(g_input_event_mutex); g_queue_push_head(g_input_event_queue, FS_INT_TO_POINTER(input_event)); fs_mutex_unlock(g_input_event_mutex); }
static void post_video_event(int event) { if (fse_drivers()) { // printf("FSE_DRIVERS: ignoring post_video_event\n"); } else { fs_mutex_lock(g_video_event_mutex); g_queue_push_head(g_video_event_queue, FS_INT_TO_POINTER(event)); fs_mutex_unlock(g_video_event_mutex); } }
static void post_video_event(int event) { #ifdef FS_EMU_DRIVERS // printf("FS_EMU_DRIVERS: ignoring post_video_event\n"); #else fs_mutex_lock(g_video_event_mutex); g_queue_push_head(g_video_event_queue, FS_INT_TO_POINTER(event)); fs_mutex_unlock(g_video_event_mutex); #endif }
static int check_buffer(int stream, int buffer) { unqueue_old_buffers(stream); audio_stream *s = g_streams[stream]; // not extremely efficient fs_mutex_lock(s->mutex); GList *link = g_queue_peek_head_link(s->queue); while (link) { if ((unsigned int) buffer == FS_POINTER_TO_UINT(link->data)) { fs_mutex_unlock(s->mutex); return 1; } link = link->next; } fs_mutex_unlock(s->mutex); return 0; }
void fs_log_string(const char *str) { if (!log.initialized) { initialize(); } fs_mutex_lock(log.mutex); if (log.use_stdout) { printf("%s", str); fflush(stdout); } if (log.file) { fprintf(log.file, "%s", str); } if (log.flush) { fflush(log.file); } fs_mutex_unlock(log.mutex); }
void fs_ml_video_screenshot(const char *path) { fs_mutex_lock(g_fs_ml_video_screenshot_mutex); if (g_fs_ml_video_screenshot_path) { g_free(g_fs_ml_video_screenshot_path); } g_fs_ml_video_screenshot_path = g_strdup(path); #if 0 if (g_fs_ml_video_screenshots_dir) { g_free(g_fs_ml_video_screenshots_dir); } if (g_fs_ml_video_screenshots_prefix) { g_free(g_fs_ml_video_screenshots_prefix); } g_fs_ml_video_screenshots_dir = g_strdup(screenshots_dir); g_fs_ml_video_screenshots_prefix = g_strdup(prefix); g_fs_ml_video_screenshot = number; #endif fs_mutex_unlock(g_fs_ml_video_screenshot_mutex); }
static void process_video_events() { fs_mutex_lock(g_video_event_mutex); int count = g_queue_get_length(g_video_event_queue); for (int i = 0; i < count; i++) { int event = FS_POINTER_TO_INT(g_queue_pop_tail(g_video_event_queue)); if (event == FS_ML_VIDEO_EVENT_GRAB_INPUT) { fs_ml_grab_input(1, 1); } else if (event == FS_ML_VIDEO_EVENT_UNGRAB_INPUT) { fs_ml_grab_input(0, 1); } else if (event == FS_ML_VIDEO_EVENT_SHOW_CURSOR) { fs_ml_show_cursor(1, 1); } else if (event == FS_ML_VIDEO_EVENT_HIDE_CURSOR) { fs_ml_show_cursor(0, 1); } } fs_mutex_unlock(g_video_event_mutex); }
void fs_config_set_log_file(const char *path) { fs_log("switch to log file %s\n", path); fs_mutex_lock(log.mutex); if (log.file) { fclose(log.file); } log.file = g_fopen(path, "w"); if (log.file) { printf("LOG: %s\n", path); if (log.initial_path) { FILE *f = g_fopen(log.initial_path, "r"); if (f) { char *buffer = (char *) g_malloc(1024); int read = fread(buffer, 1, 1024, f); while (read > 0) { fwrite(buffer, 1, read, log.file); read = fread(buffer, 1, 1024, f); } g_free(buffer); fclose(f); } } } /* All options should have been read, so we can no check log options */ if (fs_config_get_boolean("flush_log") == 1) { log.flush = 1; } fs_mutex_unlock(log.mutex); if (log.flush) { fs_log_string("flush_log: will flush log after each log line\n"); } }
static void process_video_events(void) { fs_mutex_lock(g_video_event_mutex); int count = g_queue_get_length(g_video_event_queue); for (int i = 0; i < count; i++) { int event = FS_POINTER_TO_INT(g_queue_pop_tail(g_video_event_queue)); if (event == FS_ML_VIDEO_EVENT_GRAB_INPUT) { fs_ml_set_input_grab(true); } else if (event == FS_ML_VIDEO_EVENT_UNGRAB_INPUT) { fs_ml_set_input_grab(false); } else if (event == FS_ML_VIDEO_EVENT_SHOW_CURSOR) { fs_ml_show_cursor(1, 1); } else if (event == FS_ML_VIDEO_EVENT_HIDE_CURSOR) { fs_ml_show_cursor(0, 1); } else if (event == FS_ML_VIDEO_EVENT_TOGGLE_FULLSCREEN) { fs_ml_toggle_fullscreen(); } else if (event == FS_ML_VIDEO_EVENT_ENABLE_FULLSCREEN) { fs_ml_set_fullscreen(true); } else if (event == FS_ML_VIDEO_EVENT_DISABLE_FULLSCREEN) { fs_ml_set_fullscreen(false); } } fs_mutex_unlock(g_video_event_mutex); }
static void render_iteration_vsync() { if (g_fs_ml_video_sync_low_latency) { int current_frame_at_start = g_available_frame; //int64_t t1 = fs_ml_monotonic_time(); int sleep_time = 0; int time_left = g_estimated_upload_render_duration; int64_t t = fs_emu_monotonic_time(); if (g_fs_ml_target_frame_time > 0) { sleep_time = g_estimated_next_vblank_time - t - time_left; } if (sleep_time > g_fs_ml_target_frame_time - time_left) { sleep_time = 0; } if (sleep_time > 0) { fs_ml_usleep(sleep_time); } if (g_available_frame > current_frame_at_start) { //printf("low latency %d\n", g_available_frame); } else { //printf("...\n"); } } update_frame(); CHECK_GL_ERROR_MSG("update_frame"); render_frame(); CHECK_GL_ERROR_MSG("render_frame"); //opengl_fence(FENCE_SET); //glFlush(); //opengl_fence(FENCE_WAIT); //int64_t upload_render_time = fs_ml_monotonic_time() - t1; //printf("urt %lld\n", upload_render_time); opengl_swap_synchronous(); g_measured_vblank_time = fs_ml_monotonic_time(); g_vblank_count++; fs_mutex_lock(g_vblank_mutex); g_measured_vblank_times[g_vblank_index] = g_measured_vblank_time; g_vblank_index = (g_vblank_index + 1) % VBLANK_COUNT; fs_mutex_unlock(g_vblank_mutex); // FIXME: adjust g_measured_vblank_time based on historical data (smooth out // irregularities) and save the result in g_adjusted_vblank_time g_adjusted_vblank_time = g_measured_vblank_time; g_sleep_until_vsync_last_time = g_adjusted_vblank_time; g_estimated_next_vblank_time = g_adjusted_vblank_time + \ g_fs_ml_target_frame_time; // g_start_new_frame_cond is used to signal that a new frame can be // generated when the emulation is running in sync - this is not used // when only display flipping is synced to vblank fs_mutex_lock(g_start_new_frame_mutex); g_start_new_frame = 1; fs_condition_signal(g_start_new_frame_cond); fs_mutex_unlock(g_start_new_frame_mutex); }
static int queue_buffer(int stream, int16_t* data, int size) { if (g_fs_emu_benchmark_mode) { /* no audio output while benchmarking */ return 0; } if (g_fs_emu_benchmarking) { /* no audio output while benchmarking */ return 0; } //fs_log("fs_emu_queue_audio_buffer stream %d size %d\n", stream, size); audio_stream *s = g_streams[stream]; ALuint buffer = 0; fs_mutex_lock(s->mutex); //while (1) { buffer = FS_POINTER_TO_UINT(g_queue_pop_head(s->queue)); if (!buffer) { fs_log("no audio buffer available - dropping data\n"); fs_mutex_unlock(s->mutex); return 0; } s->buffers_queued += 1; // create a local copy while we have the lock //int buffers_queued = s->buffers_queued; fs_mutex_unlock(s->mutex); #if 0 /* for debugging, clear one of the stereo channels */ int16_t *d = data; for (int i = 0; i < size / 4; i++) { d++; *d = 0; d++; } #endif alBufferData(buffer, AL_FORMAT_STEREO16, data, size, s->frequency); check_al_error("alBufferData"); alSourceQueueBuffers(s->source, 1, &buffer); check_al_error("alSourceQueueBuffers"); ALint state; alGetSourcei(s->source, AL_SOURCE_STATE, &state); check_al_error("alGetSourcei (AL_SOURCE_STATE)"); if (state != AL_PLAYING) { g_fs_emu_audio_buffer_underrun_time = fs_get_monotonic_time(); g_fs_emu_audio_buffer_underruns += 1; // we have had a buffer underrun - we now wait until we have queued // some buffers //if (buffers_queued < s->min_buffers) { // // want more buffers //} //else { fs_log("restarting audio stream %d (buffer underrun)\n", stream); alSourcePlay(s->source); g_fs_emu_audio_stream_playing[stream] = 1; check_al_error("alSourcePlay"); //} } double want_volume = g_fs_emu_audio_want_volume[stream] * 0.9; if (want_volume != s->source_volume_current) { s->source_volume_current = want_volume; alSourcef(s->source, AL_GAIN, want_volume); } unqueue_old_buffers(stream); return buffer; }
void fs_emu_video_render_mutex_lock() { if (!g_video_render_mutex) { return; } fs_mutex_lock(g_video_render_mutex); }
void fs_emu_acquire_gui_lock() { fs_mutex_lock(g_gui_mutex); g_gui_mutex_locked = 1; }
void fs_ml_render_iteration() { static int first = 1; if (first) { first = 0; initialize_opengl_sync(); } if (g_fs_ml_vblank_sync) { render_iteration_vsync(); } else if (g_fs_ml_benchmarking) { update_frame(); render_frame(); swap_opengl_buffers(); } else { // when vsync is off, we wait until a new frame is ready and // then we display it immediately if (fs_ml_is_quitting()) { // but when the emulation is quitting, we can't expect any new // frames so there's no point waiting for them. Instead, we just // sleep a bit to throttle the frame rate for the quit animation fs_ml_usleep(10000); } else { // wait max 33 ms to allow the user interface to work even if // the emu hangs // int64_t dest_time = fs_get_real_time() + 33 * 1000; int64_t end_time = fs_condition_get_wait_end_time(33 * 1000); int64_t check_time = 0; fs_mutex_lock(g_frame_available_mutex); // fs_log("cond wait until %lld\n", end_time); while (g_rendered_frame == g_available_frame) { fs_condition_wait_until( g_frame_available_cond, g_frame_available_mutex, end_time); check_time = fs_condition_get_wait_end_time(0); if (check_time >= end_time) { // fs_log("timed out at %lld\n", check_time); break; } else { // fs_log("wake-up at %lld (end_time = %lld)\n", check_time, end_time); } } fs_mutex_unlock(g_frame_available_mutex); } update_frame(); render_frame(); swap_opengl_buffers(); //gl_finish(); } if (g_fs_ml_video_screenshot_path) { fs_mutex_lock(g_fs_ml_video_screenshot_mutex); if (g_fs_ml_video_screenshot_path) { save_screenshot_of_opengl_framebuffer( g_fs_ml_video_screenshot_path); g_free(g_fs_ml_video_screenshot_path); g_fs_ml_video_screenshot_path = NULL; } fs_mutex_unlock(g_fs_ml_video_screenshot_mutex); } if (g_fs_ml_video_post_render_function) { g_fs_ml_video_post_render_function(); } }
static void post_video_event(int event) { fs_mutex_lock(g_video_event_mutex); g_queue_push_head(g_video_event_queue, FS_INT_TO_POINTER(event)); fs_mutex_unlock(g_video_event_mutex); }
void fs_init_lock() { fs_mutex_lock(g_init_mutex); }