void video_refresh_timer(void *userdata) { VideoState *is = (VideoState *)userdata; VideoPicture *vp; double actual_delay, delay, sync_threshold, ref_clock, diff; if(is->video_ctx) { if(is->pictq_size == 0) { schedule_refresh(is, 1); } else { vp = &is->pictq[is->pictq_rindex]; delay = vp->pts - is->frame_last_pts; /* the pts from last time */ if(delay <= 0 || delay >= 1.0) { /* if incorrect delay, use previous one */ delay = is->frame_last_delay; } /* save for next time */ is->frame_last_delay = delay; is->frame_last_pts = vp->pts; /* update delay to sync to audio */ ref_clock = get_audio_clock(is); diff = vp->pts - ref_clock; /* Skip or repeat the frame. Take delay into account FFPlay still doesn't "know if this is the best guess." */ sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD; if(fabs(diff) < AV_NOSYNC_THRESHOLD) { if(diff <= -sync_threshold) { delay = 0; } else if(diff >= sync_threshold) { delay = 2 * delay; } } is->frame_timer += delay; /* computer the REAL delay */ actual_delay = is->frame_timer - (av_gettime() / 1000000.0); if(actual_delay < 0.010) { /* Really it should skip the picture instead */ actual_delay = 0.010; } schedule_refresh(is, (int)(actual_delay * 1000 + 0.5)); /* show the picture! */ video_display(is); /* update queue for next picture! */ if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { is->pictq_rindex = 0; } SDL_LockMutex(is->pictq_mutex); is->pictq_size--; SDL_CondSignal(is->pictq_cond); SDL_UnlockMutex(is->pictq_mutex); } } else { schedule_refresh(is, 100); } }
void video_refresh_timer(void* data) { VideoState* is = (VideoState*) data; VideoPicture* vp; double actual_delay, delay, sync_threshold, ref_clock, diff; if (is->video_st) { if (is->pictq_size == 0) { schedule_refresh(is, 1); } else { vp = &is->pictq[is->pictq_rindex]; is->video_current_pts = vp->pts; is->video_current_pts_time = av_gettime(); delay = vp->pts - is->frame_last_pts; if (delay <= 0 || delay >= 1.0) { delay = is->frame_last_delay; } /* save for next time */ is->frame_last_delay = delay; is->frame_last_pts = vp->pts; /* update delay to sync to audio if not master source*/ if (is->av_sync_type != AV_SYNC_VIDEO_MASTER) { ref_clock = get_audio_clock(is); diff = vp->pts - ref_clock; sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD; if (fabs(diff) < AV_NOSYNC_THRESHOLD) { if (diff <= -sync_threshold) { delay = 0; } else if (diff >= sync_threshold) { delay = 2 * delay; } } } is->frame_timer += delay; /* computer the REAL delay */ actual_delay = is->frame_timer - (av_gettime() / 1000000.0); if (actual_delay < 0.010) { actual_delay = 0.010; } schedule_refresh(is, int(actual_delay * 1000 + 0.5)); /* show the picture! */ video_display(is); if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { is->pictq_rindex = 0; } SDL_LockMutex(is->pictq_mutex); is->pictq_size--; SDL_CondSignal(is->pictq_cond); SDL_UnlockMutex(is->pictq_mutex); } } else { schedule_refresh(is, 100); } }
void video_refresh_timer(void *userdata) { VideoState *is = (VideoState *) userdata; //VideoPicture *vp; if (is->video_st) { if (is->pictq_size == 0) { schedule_refresh(is, 1); } else { //vp = &is->pictq[is->pictq_rindex]; /* Now, normally here goes a ton of code about timing, etc. we're just going to guess at a delay for now. You can increase and decrease this value and hard code the timing - but I don't suggest that ;) We'll learn how to do it for real later. */ schedule_refresh(is, 80); /* show the picture! */ video_display(is); /* update queue for next picture! */ if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { is->pictq_rindex = 0; } SDL_LockMutex(is->pictq_mutex); is->pictq_size--; SDL_CondSignal(is->pictq_cond); SDL_UnlockMutex(is->pictq_mutex); } } else { schedule_refresh(is, 100); } }
void CLS_DlgStreamPusher::screen_display(struct_stream_info *_pstrct_streaminfo) { if (_pstrct_streaminfo->m_pAudioFrame && _pstrct_streaminfo->m_show_mode != SHOW_MODE_VIDEO){ audio_display(_pstrct_streaminfo); } else{ video_display(_pstrct_streaminfo); } }
/* this thread gets the stream from the disk or the network */ static int decode_thread(void *arg) { /* DECODE THREAD */ FFMovie *movie = arg; int status; AVPacket pkt1, *pkt = &pkt1; while(!movie->abort_request && !Global_abort_all) { /* read if the queues have room */ if (movie->audioq.size < MAX_AUDIOQ_SIZE && !movie->dest_showtime) { if (av_read_packet(movie->context, pkt) < 0) { break; } if (movie->audio_st && pkt->stream_index == movie->audio_st->index) { packet_queue_put(&movie->audioq, pkt); } else if (movie->video_st && pkt->stream_index == movie->video_st->index) { status = video_read_packet(movie, pkt); av_free_packet(pkt); if(status < 0) { break; } } else { av_free_packet(pkt); } } if(movie->dest_showtime) { double now = get_master_clock(movie); if(now >= movie->dest_showtime) { video_display(movie); movie->dest_showtime = 0; } else { // printf("showtime not ready, waiting... (%.2f,%.2f)\n", // (float)now, (float)movie->dest_showtime); SDL_Delay(10); } } if(movie->paused) { double endpause, startpause = SDL_GetTicks() / 1000.0; while(movie->paused && !movie->abort_request && !Global_abort_all) { SDL_Delay(100); } endpause = SDL_GetTicks() / 1000.0; movie->dest_showtime = 0; movie->time_offset += endpause - startpause; } } ffmovie_cleanup(movie); return 0; }
/* 每次timer到时间会进来(timer到时间发 FF_REFRESH_EVENT,收到 FF_REFRESH_EVENT 会进来) 一个timer只进一次timer就失效了。不过本函数里面会再起一个timer。 从is->pictq拿出一个 VideoPicture 进行显示,然后pictq的读指针向前移动一步 */ void video_refresh_timer(void *userdata) { VideoState *is = (VideoState *)userdata; VideoPicture *vp; double actual_delay, delay, sync_threshold, ref_clock, diff; if (is->video_st) { if (is->pictq_size == 0) { schedule_refresh(is, 1); } else { vp = &is->pictq[is->pictq_rindex]; delay = vp->pts - is->frame_last_pts; /* the pts from last time */ is->frame_last_pts = vp->pts; /* ----------- */ /*音视频同步*/ ref_clock = get_audio_clock(is); diff = vp->pts - ref_clock; if (diff <= -0.015) { delay = 0; } else if (diff >= 0.015) { delay = 2 * delay; } /* ----------- */ if (delay == 0) { count_delay_is_zero++; delay = 0.010; } count_pict++; printf("delay==0 percentage is %lf",(double)count_delay_is_zero/count_pict); schedule_refresh(is, (int)(delay * 1000 + 0.5)); /* show the picture! */ video_display(is); /* update queue for next picture! */ if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { is->pictq_rindex = 0; } SDL_LockMutex(is->pictq_mutex); is->pictq_size--; SDL_CondSignal(is->pictq_cond); SDL_UnlockMutex(is->pictq_mutex); } } else { schedule_refresh(is, 100); } }
int FeaturesDetectionApplication::main_loop(program_options::variables_map &options) { printf("FeaturesDetectionApplication::main_loop says hello world !\n"); //init_gui(options); //run_gui(); // initialization --- gst_video_input_p.reset(new GstVideoInput(options)); features_detector_p.reset(new SimpleFAST(options)); // video output --- rgb8_cimg_t current_image(gst_video_input_p->get_image_dimensions()); gst_video_input_p->get_new_image(current_image.view); // copy the data CImgDisplay video_display(current_image.dimx(), current_image.dimy(), get_application_title().c_str()); video_display.show(); video_display.display(current_image); // intermediary image -- gray8_image_t gray_image(current_image.view.dimensions()); // main loop --- do { // get new image -- gst_video_input_p->get_new_image(current_image.view); // copy the data // color to gray_image copy_and_convert_pixels(current_image.view, boost::gil::view(gray_image)); // compute features const vector<FASTFeature> &features = features_detector_p->detect_features((const_view(gray_image))); // plot features on output image draw_features(features, current_image); video_display.display(current_image); // add a delay --- wait_some_seconds(0.1); // [seconds] } while (video_display.is_closed == false); return 0; }
void CONSOLE::printn( const char ch, size_t count ) { for( size_t i = 0; i < count; ++i ) { write_char_to_console( ch, this ); } if( this->visible ) { video_display( this->buffer + this->top_row * this->buffer_width ); video_move_cursor( this->cursor_column, this->cursor_row - this->top_row ); } }
void CONSOLE::printf( const char* format, ... ) { va_list ap; va_start( ap, format ); format_string_varg( CONSOLE::write_char_to_console, this, format, ap ); va_end( ap ); if( this->visible ) { video_display( this->buffer + this->top_row * this->buffer_width ); video_move_cursor( this->cursor_column, this->cursor_row - this->top_row ); } }
static void event_loop(VideoState *is) { SDL_Event event; for (;;) { SDL_PumpEvents(); while (!SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) { av_usleep(40000); //TODO 40000改为帧率分之一 video_display(is); SDL_PumpEvents(); } switch (event.type) { case SDL_QUIT: case FF_QUIT_EVENT: SDL_Quit(); exit(0); break; case FF_ALLOC_EVENT: alloc_picture(event.user.data1); break; default: break; } } }
int video_mode(int f, int w, int h) { //senquack /* Enable standard application logging */ SDL_LogSetPriority(SDL_LOG_CATEGORY_APPLICATION, SDL_LOG_PRIORITY_INFO); SDL_LogSetAllPriority(SDL_LOG_PRIORITY_VERBOSE); int stereo = config_get_d(CONFIG_STEREO) ? 1 : 0; int stencil = config_get_d(CONFIG_REFLECTION) ? 1 : 0; int buffers = config_get_d(CONFIG_MULTISAMPLE) ? 1 : 0; int samples = config_get_d(CONFIG_MULTISAMPLE); int vsync = config_get_d(CONFIG_VSYNC) ? 1 : 0; int hmd = config_get_d(CONFIG_HMD) ? 1 : 0; int highdpi = config_get_d(CONFIG_HIGHDPI) ? 1 : 0; int dpy = config_get_d(CONFIG_DISPLAY); int X = SDL_WINDOWPOS_CENTERED_DISPLAY(dpy); int Y = SDL_WINDOWPOS_CENTERED_DISPLAY(dpy); hmd_free(); if (window) { SDL_GL_DeleteContext(context); SDL_DestroyWindow(window); } //senquack // SDL_GL_SetAttribute(SDL_GL_STEREO, stereo); //senquack - disabled drawing shadows and reflections on GCW Zero, don't need a stencil buffer: #ifdef GCWZERO SDL_GL_SetAttribute(SDL_GL_STENCIL_SIZE, 0); #endif //senquack - don't need or want these on GCW Zero: #ifndef GCWZERO SDL_GL_SetAttribute(SDL_GL_MULTISAMPLEBUFFERS, buffers); SDL_GL_SetAttribute(SDL_GL_MULTISAMPLESAMPLES, samples); #endif /* Require 16-bit double buffer with 16-bit depth buffer. */ //senquack - GCW Zero port change SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8); SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8); SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8); SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 8); SDL_GL_SetAttribute(SDL_GL_BUFFER_SIZE, 32); // SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 5); // SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 6); // SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 5); // SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 0); // SDL_GL_SetAttribute(SDL_GL_BUFFER_SIZE, 16); SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 16); SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1); //senquack SDL_GL_SetAttribute(SDL_GL_ACCELERATED_VISUAL, 1); SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_ES); SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 1); SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 1); /* Try to set the currently specified mode. */ log_printf("Creating a window (%dx%d, %s)\n", w, h, (f ? "fullscreen" : "windowed")); //senquack DEBUG - DO NOT RUN - loads GLES2.0 for some reason // SDL_VideoInit(NULL); window = SDL_CreateWindow("", X, Y, w, h, SDL_WINDOW_OPENGL | (highdpi ? SDL_WINDOW_ALLOW_HIGHDPI : 0) | (f ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0)); if (window) { if ((context = SDL_GL_CreateContext(window))) { int buf, smp; SDL_GL_GetAttribute(SDL_GL_MULTISAMPLEBUFFERS, &buf); SDL_GL_GetAttribute(SDL_GL_MULTISAMPLESAMPLES, &smp); /* * Work around SDL+WGL returning pixel formats below * minimum specifications instead of failing, thus * bypassing our fallback path. SDL tries to ensure that * WGL plays by the rules, but forgets about extended * context attributes such as multisample. See SDL * Bugzilla #77. */ if (buf < buffers || smp < samples) { log_printf("GL context does not meet minimum specifications\n"); SDL_GL_DeleteContext(context); context = NULL; } } } if (window && context) { set_window_title(TITLE); set_window_icon(ICON); /* * SDL_GetWindowSize can be unreliable when going fullscreen * on OSX (and possibly elsewhere). We should really be * waiting for a resize / size change event, but for now we're * doing this lazy thing instead. */ if (f) { SDL_DisplayMode dm; if (SDL_GetDesktopDisplayMode(video_display(), &dm) == 0) { video.window_w = dm.w; video.window_h = dm.h; // //senquack dm.format = SDL_PIXELFORMAT_RGBA8888; // dm.format = SDL_PIXELFORMAT_RGB565; dm.w = 320; dm.h = 240; dm.refresh_rate = 60; dm.driverdata = 0; SDL_SetWindowDisplayMode(window, &dm); printf("setting new video dm..\n"); SDL_GetCurrentDisplayMode(0, &dm); SDL_Log("Screen w: %d h: %d\n", dm.w, dm.h); SDL_Log("Screen bpp: %d\n", SDL_BITSPERPIXEL(dm.format)); SDL_Log("Screen dm: "); SDL_Log("\n\n"); SDL_Log("Vendor : %s\n", glGetString(GL_VENDOR)); SDL_Log("Renderer : %s\n", glGetString(GL_RENDERER)); SDL_Log("Version : %s\n", glGetString(GL_VERSION)); SDL_Log("Extensions : %s\n", glGetString(GL_EXTENSIONS)); SDL_Log("\n"); fflush(NULL); } } else { SDL_GetWindowSize(window, &video.window_w, &video.window_h); } if (highdpi) { SDL_GL_GetDrawableSize(window, &video.device_w, &video.device_h); } else { video.device_w = video.window_w; video.device_h = video.window_h; } video.device_scale = (float) video.device_h / (float) video.window_h; log_printf("Created a window (%u, %dx%d, %s)\n", SDL_GetWindowID(window), video.window_w, video.window_h, (f ? "fullscreen" : "windowed")); config_set_d(CONFIG_DISPLAY, video_display()); config_set_d(CONFIG_FULLSCREEN, f); config_set_d(CONFIG_WIDTH, video.window_w); config_set_d(CONFIG_HEIGHT, video.window_h); SDL_GL_SetSwapInterval(vsync); if (!glext_init()) return 0; glViewport(0, 0, video.device_w, video.device_h); glClearColor(0.0f, 0.0f, 0.0f, 0.0f); glEnable(GL_NORMALIZE); glEnable(GL_CULL_FACE); glEnable(GL_DEPTH_TEST); glEnable(GL_TEXTURE_2D); glEnable(GL_LIGHTING); glEnable(GL_BLEND); #if !ENABLE_OPENGLES glLightModeli(GL_LIGHT_MODEL_COLOR_CONTROL, GL_SEPARATE_SPECULAR_COLOR); #endif glPixelStorei(GL_PACK_ALIGNMENT, 1); glPixelStorei(GL_UNPACK_ALIGNMENT, 1); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glDepthFunc(GL_LEQUAL); /* If GL supports multisample, and SDL got a multisample buffer... */ if (glext_check("ARB_multisample")) { SDL_GL_GetAttribute(SDL_GL_MULTISAMPLEBUFFERS, &buffers); if (buffers) glEnable(GL_MULTISAMPLE); } /* Set up HMD display if requested. */ if (hmd) hmd_init(); /* Initialize screen snapshotting. */ snapshot_init(); video_show_cursor(); /* Grab input immediately in HMD mode. */ if (hmd_stat()) SDL_SetWindowGrab(window, SDL_TRUE); return 1; } /* If the mode failed, try it without stereo. */ else if (stereo) { config_set_d(CONFIG_STEREO, 0); return video_mode(f, w, h); } /* If the mode failed, try decreasing the level of multisampling. */ else if (buffers) { config_set_d(CONFIG_MULTISAMPLE, samples / 2); return video_mode(f, w, h); } /* If that mode failed, try it without reflections. */ else if (stencil) { config_set_d(CONFIG_REFLECTION, 0); return video_mode(f, w, h); } /* If THAT mode failed, punt. */ return 0; }
void video_display(VideoState *is) { SDLMOD_Rect rect; VideoPicture *vp; AVPicture pict; float aspect_ratio; int w, h, x, y; int i; vp = &is->pictq[is->pictq_rindex]; #if !USE_SWS_YUV_CONVERT if(vp->bmp) { #else if(vp->sfc) { #endif if(is->video_st->codec->sample_aspect_ratio.num == 0) { aspect_ratio = 0; } else { aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio) * is->video_st->codec->width / is->video_st->codec->height; } if(aspect_ratio <= 0.0) aspect_ratio = (float)is->video_st->codec->width / (float)is->video_st->codec->height; h = screen->h; w = ((int)(h * aspect_ratio)) & -3; if(w > screen->w) { w = screen->w; h = ((int)(w / aspect_ratio)) & -3; } x = (screen->w - w) / 2; y = (screen->h - h) / 2; rect.x = x; rect.y = y; rect.w = w; rect.h = h; #if !USE_SWS_YUV_CONVERT SDLMOD_DisplayYUVOverlay(vp->bmp, &rect); #else if (vp->sfc->w > 0 && vp->sfc->h > 0 && rect.w > 0 && rect.h > 0) { SDLMOD_Rect srcrect; srcrect.x = 0; srcrect.y = 0; srcrect.w = vp->sfc->w; srcrect.h = vp->sfc->h; SDLMOD_LockSurface(screen); //FIXME: SoftStretch doesn't support empty rect (dstrect->h == 0), will crash. SDLMOD_SoftStretch(vp->sfc, &srcrect, screen, &rect); SDLMOD_UnlockSurface(screen); } #endif #if USE_GL glutPostRedisplay(); #endif } } void stream_seek(VideoState *is, int64_t pos, int rel) { if(!is->seek_req) { is->seek_pos = pos; is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0; is->seek_req = 1; } } void video_refresh_timer(void *userdata) { VideoState *is = (VideoState*)userdata; VideoPicture *vp; double actual_delay, delay, sync_threshold, ref_clock, diff; if(is->video_st) { if(is->pictq_size == 0) { schedule_refresh(is, 1); } else { vp = &is->pictq[is->pictq_rindex]; is->video_current_pts = vp->pts; is->video_current_pts_time = av_gettime(); delay = vp->pts - is->frame_last_pts; if(delay <= 0 || delay >= 1.0) delay = is->frame_last_delay; is->frame_last_delay = delay; is->frame_last_pts = vp->pts; if(is->av_sync_type != AV_SYNC_VIDEO_MASTER) { ref_clock = get_master_clock(is); diff = vp->pts - ref_clock; sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD; if(fabs(diff) < AV_NOSYNC_THRESHOLD) { if(diff <= -sync_threshold) { delay = 0; } else if(diff >= sync_threshold) { delay = 2 * delay; } } } is->frame_timer += delay; actual_delay = is->frame_timer - (av_gettime() / 1000000.0); if(actual_delay < 0.010) actual_delay = 0.010; schedule_refresh(is, (int)(actual_delay * 1000 + 0.5)); video_display(is); if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) is->pictq_rindex = 0; pthread_mutex_lock(is->pictq_mutex); is->pictq_size--; //printf("video_refresh_timer signal %d\n", is->pictq_size); pthread_cond_signal(is->pictq_cond); pthread_mutex_unlock(is->pictq_mutex); } } else schedule_refresh(is, 100); }
static int loop(void) { SDL_Event e; int d = 1; int ax, ay, dx, dy; /* Process SDL events. */ while (d && SDL_PollEvent(&e)) { switch (e.type) { case SDL_QUIT: return 0; case SDL_MOUSEMOTION: /* Convert to OpenGL coordinates. */ ax = +e.motion.x; ay = -e.motion.y + video.window_h; dx = +e.motion.xrel; dy = (config_get_d(CONFIG_MOUSE_INVERT) ? +e.motion.yrel : -e.motion.yrel); /* Convert to pixels. */ ax = ROUND(ax * video.device_scale); ay = ROUND(ay * video.device_scale); dx = ROUND(dx * video.device_scale); dy = ROUND(dy * video.device_scale); st_point(ax, ay, dx, dy); break; case SDL_MOUSEBUTTONDOWN: d = st_click(e.button.button, 1); break; case SDL_MOUSEBUTTONUP: d = st_click(e.button.button, 0); break; case SDL_KEYDOWN: d = handle_key_dn(&e); break; case SDL_KEYUP: d = handle_key_up(&e); break; case SDL_WINDOWEVENT: switch (e.window.event) { case SDL_WINDOWEVENT_FOCUS_LOST: if (video_get_grab()) goto_state(&st_pause); break; case SDL_WINDOWEVENT_MOVED: if (config_get_d(CONFIG_DISPLAY) != video_display()) config_set_d(CONFIG_DISPLAY, video_display()); break; case SDL_WINDOWEVENT_RESIZED: log_printf("Resize event (%u, %dx%d)\n", e.window.windowID, e.window.data1, e.window.data2); break; case SDL_WINDOWEVENT_SIZE_CHANGED: log_printf("Size change event (%u, %dx%d)\n", e.window.windowID, e.window.data1, e.window.data2); break; } break; case SDL_TEXTINPUT: text_input_str(e.text.text, 1); break; case SDL_JOYAXISMOTION: st_stick(e.jaxis.axis, JOY_VALUE(e.jaxis.value)); break; case SDL_JOYBUTTONDOWN: d = st_buttn(e.jbutton.button, 1); break; case SDL_JOYBUTTONUP: d = st_buttn(e.jbutton.button, 0); break; case SDL_MOUSEWHEEL: st_wheel(e.wheel.x, e.wheel.y); break; } } /* Process events via the tilt sensor API. */ if (tilt_stat()) { int b; int s; st_angle(tilt_get_x(), tilt_get_z()); while (tilt_get_button(&b, &s)) { const int X = config_get_d(CONFIG_JOYSTICK_AXIS_X0); const int Y = config_get_d(CONFIG_JOYSTICK_AXIS_Y0); const int L = config_get_d(CONFIG_JOYSTICK_DPAD_L); const int R = config_get_d(CONFIG_JOYSTICK_DPAD_R); const int U = config_get_d(CONFIG_JOYSTICK_DPAD_U); const int D = config_get_d(CONFIG_JOYSTICK_DPAD_D); if (b == L || b == R || b == U || b == D) { static int pad[4] = { 0, 0, 0, 0 }; /* Track the state of the D-pad buttons. */ if (b == L) pad[0] = s; else if (b == R) pad[1] = s; else if (b == U) pad[2] = s; else if (b == D) pad[3] = s; /* Convert D-pad button events into joystick axis motion. */ if (pad[0] && !pad[1]) st_stick(X, -1.0f); else if (pad[1] && !pad[0]) st_stick(X, +1.0f); else st_stick(X, 0.0f); if (pad[2] && !pad[3]) st_stick(Y, -1.0f); else if (pad[3] && !pad[2]) st_stick(Y, +1.0f); else st_stick(Y, 0.0f); } else d = st_buttn(b, s); } } return d; }
static int loop(void) { SDL_Event e; int d = 1; int c; while (d && SDL_PollEvent(&e)) { if (e.type == SDL_QUIT) return 0; switch (e.type) { case SDL_MOUSEMOTION: st_point(+e.motion.x, -e.motion.y + config_get_d(CONFIG_HEIGHT), +e.motion.xrel, -e.motion.yrel); break; case SDL_MOUSEBUTTONDOWN: d = st_click(e.button.button, 1); break; case SDL_MOUSEBUTTONUP: d = st_click(e.button.button, 0); break; case SDL_KEYDOWN: c = e.key.keysym.sym; #ifdef __APPLE__ if (c == SDLK_q && e.key.keysym.mod & KMOD_GUI) { d = 0; break; } #endif #ifdef _WIN32 if (c == SDLK_F4 && e.key.keysym.mod & KMOD_ALT) { d = 0; break; } #endif switch (c) { case KEY_SCREENSHOT: shot(); break; case KEY_FPS: config_tgl_d(CONFIG_FPS); break; case KEY_WIREFRAME: toggle_wire(); break; case SDLK_RETURN: d = st_buttn(config_get_d(CONFIG_JOYSTICK_BUTTON_A), 1); break; case SDLK_ESCAPE: if (video_get_grab()) d = st_buttn(config_get_d(CONFIG_JOYSTICK_BUTTON_START), 1); else d = st_buttn(config_get_d(CONFIG_JOYSTICK_BUTTON_B), 1); break; default: if (config_tst_d(CONFIG_KEY_FORWARD, c)) st_stick(config_get_d(CONFIG_JOYSTICK_AXIS_Y0), -1.0f); else if (config_tst_d(CONFIG_KEY_BACKWARD, c)) st_stick(config_get_d(CONFIG_JOYSTICK_AXIS_Y0), +1.0f); else if (config_tst_d(CONFIG_KEY_LEFT, c)) st_stick(config_get_d(CONFIG_JOYSTICK_AXIS_X0), -1.0f); else if (config_tst_d(CONFIG_KEY_RIGHT, c)) st_stick(config_get_d(CONFIG_JOYSTICK_AXIS_X0), +1.0f); else d = st_keybd(e.key.keysym.sym, 1); } break; case SDL_KEYUP: c = e.key.keysym.sym; switch (c) { case SDLK_RETURN: d = st_buttn(config_get_d(CONFIG_JOYSTICK_BUTTON_A), 0); break; case SDLK_ESCAPE: if (video_get_grab()) d = st_buttn(config_get_d(CONFIG_JOYSTICK_BUTTON_START), 0); else d = st_buttn(config_get_d(CONFIG_JOYSTICK_BUTTON_B), 0); break; default: if (config_tst_d(CONFIG_KEY_FORWARD, c)) st_stick(config_get_d(CONFIG_JOYSTICK_AXIS_Y0), 0.0f); else if (config_tst_d(CONFIG_KEY_BACKWARD, c)) st_stick(config_get_d(CONFIG_JOYSTICK_AXIS_Y0), 0.0f); else if (config_tst_d(CONFIG_KEY_LEFT, c)) st_stick(config_get_d(CONFIG_JOYSTICK_AXIS_X0), 0.0f); else if (config_tst_d(CONFIG_KEY_RIGHT, c)) st_stick(config_get_d(CONFIG_JOYSTICK_AXIS_X0), 0.0f); else d = st_keybd(e.key.keysym.sym, 0); } break; case SDL_WINDOWEVENT: switch (e.window.event) { case SDL_WINDOWEVENT_FOCUS_LOST: if (video_get_grab()) goto_pause(&st_over); break; case SDL_WINDOWEVENT_MOVED: if (config_get_d(CONFIG_DISPLAY) != video_display()) config_set_d(CONFIG_DISPLAY, video_display()); break; } break; case SDL_JOYAXISMOTION: st_stick(e.jaxis.axis, JOY_VALUE(e.jaxis.value)); break; case SDL_JOYBUTTONDOWN: d = st_buttn(e.jbutton.button, 1); break; case SDL_JOYBUTTONUP: d = st_buttn(e.jbutton.button, 0); break; } } return d; }
static int loop(void) { SDL_Event e; int d = 1; int c; int ax, ay, dx, dy; while (d && SDL_PollEvent(&e)) { if (e.type == SDL_QUIT) return 0; switch (e.type) { case SDL_MOUSEMOTION: /* Convert to OpenGL coordinates. */ ax = +e.motion.x; ay = -e.motion.y + video.window_h; dx = +e.motion.xrel; dy = -e.motion.yrel; /* Convert to pixels. */ ax = ROUND(ax * video.device_scale); ay = ROUND(ay * video.device_scale); dx = ROUND(dx * video.device_scale); dy = ROUND(dy * video.device_scale); st_point(ax, ay, dx, dy); break; case SDL_MOUSEBUTTONDOWN: d = st_click(e.button.button, 1); break; case SDL_MOUSEBUTTONUP: d = st_click(e.button.button, 0); break; case SDL_FINGERMOTION: case SDL_FINGERDOWN: case SDL_FINGERUP: /* Convert to OpenGL coordinates. */ if (video.device_orientation & VIDEO_ORIENTATION_ROTATE) { ax = e.tfinger.y * video.window_w; ay = e.tfinger.x * video.window_h; dx = e.tfinger.dy * video.window_w; dy = e.tfinger.dx * video.window_h; if (video.device_orientation & VIDEO_ORIENTATION_MIRROR) { ax = video.window_w - ax; ay = video.window_h - ay; dx *= -1; dy *= -1; } } else { ax = e.tfinger.x * video.window_w; ay = e.tfinger.y * video.window_h; dx = e.tfinger.dx * video.window_w; dy = e.tfinger.dy * video.window_h; if (video.device_orientation & VIDEO_ORIENTATION_MIRROR) { ax = video.window_w - ax; dx *= -1; } else { ay = video.window_h - ay; dy *= -1; } } /* Convert to pixels. */ ax = ROUND(ax * video.device_scale); ay = ROUND(ay * video.device_scale); dx = ROUND(dx * video.device_scale); dy = ROUND(dy * video.device_scale); st_point(ax, ay, dx, dy); if (e.type == SDL_FINGERDOWN) { d = st_click(SDL_BUTTON_LEFT, 1); } else if (e.type == SDL_FINGERUP) { d = st_click(SDL_BUTTON_LEFT, 0); st_point(0, 0, 0, 0); } break; case SDL_KEYDOWN: c = e.key.keysym.sym; #ifdef __APPLE__ if (c == SDLK_q && e.key.keysym.mod & KMOD_GUI) { d = 0; break; } #endif #ifdef _WIN32 if (c == SDLK_F4 && e.key.keysym.mod & KMOD_ALT) { d = 0; break; } #endif switch (c) { case KEY_SCREENSHOT: shot(); break; case KEY_FPS: config_tgl_d(CONFIG_FPS); break; case KEY_WIREFRAME: toggle_wire(); break; case SDLK_RETURN: case SDLK_KP_ENTER: d = st_buttn(config_get_d(CONFIG_JOYSTICK_BUTTON_A), 1); break; case SDLK_ESCAPE: if (video_get_grab()) d = st_buttn(config_get_d(CONFIG_JOYSTICK_BUTTON_START), 1); else d = st_buttn(config_get_d(CONFIG_JOYSTICK_BUTTON_B), 1); break; default: if (config_tst_d(CONFIG_KEY_FORWARD, c)) st_stick(config_get_d(CONFIG_JOYSTICK_AXIS_Y0), -1.0f); else if (config_tst_d(CONFIG_KEY_BACKWARD, c)) st_stick(config_get_d(CONFIG_JOYSTICK_AXIS_Y0), +1.0f); else if (config_tst_d(CONFIG_KEY_LEFT, c)) st_stick(config_get_d(CONFIG_JOYSTICK_AXIS_X0), -1.0f); else if (config_tst_d(CONFIG_KEY_RIGHT, c)) st_stick(config_get_d(CONFIG_JOYSTICK_AXIS_X0), +1.0f); else d = st_keybd(e.key.keysym.sym, 1); } break; case SDL_KEYUP: c = e.key.keysym.sym; switch (c) { case SDLK_RETURN: case SDLK_KP_ENTER: d = st_buttn(config_get_d(CONFIG_JOYSTICK_BUTTON_A), 0); break; case SDLK_ESCAPE: if (video_get_grab()) d = st_buttn(config_get_d(CONFIG_JOYSTICK_BUTTON_START), 0); else d = st_buttn(config_get_d(CONFIG_JOYSTICK_BUTTON_B), 0); break; default: if (config_tst_d(CONFIG_KEY_FORWARD, c)) st_stick(config_get_d(CONFIG_JOYSTICK_AXIS_Y0), 0.0f); else if (config_tst_d(CONFIG_KEY_BACKWARD, c)) st_stick(config_get_d(CONFIG_JOYSTICK_AXIS_Y0), 0.0f); else if (config_tst_d(CONFIG_KEY_LEFT, c)) st_stick(config_get_d(CONFIG_JOYSTICK_AXIS_X0), 0.0f); else if (config_tst_d(CONFIG_KEY_RIGHT, c)) st_stick(config_get_d(CONFIG_JOYSTICK_AXIS_X0), 0.0f); else d = st_keybd(e.key.keysym.sym, 0); } break; case SDL_WINDOWEVENT: switch (e.window.event) { case SDL_WINDOWEVENT_FOCUS_LOST: if (video_get_grab()) goto_pause(&st_over); break; case SDL_WINDOWEVENT_MOVED: if (config_get_d(CONFIG_DISPLAY) != video_display()) config_set_d(CONFIG_DISPLAY, video_display()); break; } break; case SDL_JOYAXISMOTION: st_stick(e.jaxis.axis, JOY_VALUE(e.jaxis.value)); break; case SDL_JOYBUTTONDOWN: d = st_buttn(e.jbutton.button, 1); break; case SDL_JOYBUTTONUP: d = st_buttn(e.jbutton.button, 0); break; } } return d; }
void video_refresh_timer(void *userdata) { VideoState *is = (VideoState *)userdata; VideoPicture *vp; double actual_delay, delay, sync_threshold, ref_clock, diff; if(is->video_st) { if(is->pictq_size == 0) { fprintf(stderr, "%s pictq_size is 0, schedule another refresh\n", __FUNCTION__); schedule_refresh(is, 1); } else { vp = &is->pictq[is->pictq_rindex]; delay = vp->pts - is->frame_last_pts; /* the pts from last time */ fprintf(stderr, "delay 1: %.8f\n", delay); if(delay <= 0 || delay >= 1.0) { //larger than 1 seconds or smaller than 0 /* if incorrect delay, use previous one */ delay = is->frame_last_delay; fprintf(stderr, "delay 2: %.8f\n", delay); } /* save for next time */ is->frame_last_delay = delay; is->frame_last_pts = vp->pts; /* update delay to sync to audio */ ref_clock = get_audio_clock(is); diff = vp->pts - ref_clock; fprintf(stderr, "audio video diff: %.8f\n", diff); /* Skip or repeat the frame. Take delay into account FFPlay still doesn't "know if this is the best guess." */ sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD; if(fabs(diff) < AV_NOSYNC_THRESHOLD) { if(diff <= -sync_threshold) { delay = 0; } else if(diff >= sync_threshold) { delay = 2 * delay; } } is->frame_timer += delay; /* computer the REAL delay */ actual_delay = is->frame_timer - (av_gettime() / 1000000.0); fprintf(stderr, "actual_delay %.8f\n", actual_delay); if(actual_delay < 0.010) { //smaller than 10 ms /* Really it should skip the picture instead */ actual_delay = 0.010; } // Why add 0.5 here. I see many 0.5 in multimedia framework code, such as stagefright. fprintf(stderr, "%s, delay: %.8f\n", __FUNCTION__, actual_delay*1000+0.5); // Video is faster than audio, so we need a delay render. // after we show a frame, we figure out when the next frame should be shown schedule_refresh(is, (int)(actual_delay * 1000 + 0.5)); /* show the picture! */ video_display(is); fprintf(stderr, "\n---------------------------------------------------------------------\n"); /* update queue for next picture! */ if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) { is->pictq_rindex = 0; } SDL_LockMutex(is->pictq_mutex); is->pictq_size--; SDL_CondSignal(is->pictq_cond); SDL_UnlockMutex(is->pictq_mutex); } } else { fprintf(stderr, "%s, schedule_refresh for another 100 ms\n", __FUNCTION__); schedule_refresh(is, 100); } }
int video_mode(int f, int w, int h) { int stereo = config_get_d(CONFIG_STEREO) ? 1 : 0; int stencil = config_get_d(CONFIG_REFLECTION) ? 1 : 0; int buffers = config_get_d(CONFIG_MULTISAMPLE) ? 1 : 0; int samples = config_get_d(CONFIG_MULTISAMPLE); int vsync = config_get_d(CONFIG_VSYNC) ? 1 : 0; int hmd = config_get_d(CONFIG_HMD) ? 1 : 0; int highdpi = config_get_d(CONFIG_HIGHDPI) ? 1 : 0; int dpy = config_get_d(CONFIG_DISPLAY); int X = SDL_WINDOWPOS_CENTERED_DISPLAY(dpy); int Y = SDL_WINDOWPOS_CENTERED_DISPLAY(dpy); hmd_free(); if (window) { SDL_GL_DeleteContext(context); SDL_DestroyWindow(window); } #if ENABLE_OPENGLES SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_ES); SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 1); SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 1); #endif SDL_GL_SetAttribute(SDL_GL_STEREO, stereo); SDL_GL_SetAttribute(SDL_GL_STENCIL_SIZE, stencil); SDL_GL_SetAttribute(SDL_GL_MULTISAMPLEBUFFERS, buffers); SDL_GL_SetAttribute(SDL_GL_MULTISAMPLESAMPLES, samples); /* Require 16-bit double buffer with 16-bit depth buffer. */ SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 5); SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 5); SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 5); SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 16); SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1); /* Try to set the currently specified mode. */ log_printf("Creating a window (%dx%d, %s)\n", w, h, (f ? "fullscreen" : "windowed")); window = SDL_CreateWindow("", X, Y, w, h, SDL_WINDOW_OPENGL | (highdpi ? SDL_WINDOW_ALLOW_HIGHDPI : 0) | (f ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0)); if (window) { if ((context = SDL_GL_CreateContext(window))) { int buf, smp; SDL_GL_GetAttribute(SDL_GL_MULTISAMPLEBUFFERS, &buf); SDL_GL_GetAttribute(SDL_GL_MULTISAMPLESAMPLES, &smp); /* * Work around SDL+WGL returning pixel formats below * minimum specifications instead of failing, thus * bypassing our fallback path. SDL tries to ensure that * WGL plays by the rules, but forgets about extended * context attributes such as multisample. See SDL * Bugzilla #77. */ if (buf < buffers || smp < samples) { log_printf("GL context does not meet minimum specifications\n"); SDL_GL_DeleteContext(context); context = NULL; } } } if (window && context) { set_window_title(TITLE); set_window_icon(ICON); /* * SDL_GetWindowSize can be unreliable when going fullscreen * on OSX (and possibly elsewhere). We should really be * waiting for a resize / size change event, but for now we're * doing this lazy thing instead. */ if (f) { SDL_DisplayMode dm; if (SDL_GetDesktopDisplayMode(video_display(), &dm) == 0) { video.window_w = dm.w; video.window_h = dm.h; } } else { SDL_GetWindowSize(window, &video.window_w, &video.window_h); } if (highdpi) { SDL_GL_GetDrawableSize(window, &video.device_w, &video.device_h); } else { video.device_w = video.window_w; video.device_h = video.window_h; } video.device_scale = (float) video.device_h / (float) video.window_h; log_printf("Created a window (%u, %dx%d, %s)\n", SDL_GetWindowID(window), video.window_w, video.window_h, (f ? "fullscreen" : "windowed")); config_set_d(CONFIG_DISPLAY, video_display()); config_set_d(CONFIG_FULLSCREEN, f); config_set_d(CONFIG_WIDTH, video.window_w); config_set_d(CONFIG_HEIGHT, video.window_h); SDL_GL_SetSwapInterval(vsync); if (!glext_init()) return 0; glViewport(0, 0, video.device_w, video.device_h); glClearColor(0.0f, 0.0f, 0.0f, 0.0f); glEnable(GL_NORMALIZE); glEnable(GL_CULL_FACE); glEnable(GL_DEPTH_TEST); glEnable(GL_TEXTURE_2D); glEnable(GL_LIGHTING); glEnable(GL_BLEND); #if !ENABLE_OPENGLES glLightModeli(GL_LIGHT_MODEL_COLOR_CONTROL, GL_SEPARATE_SPECULAR_COLOR); #endif glPixelStorei(GL_PACK_ALIGNMENT, 1); glPixelStorei(GL_UNPACK_ALIGNMENT, 1); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glDepthFunc(GL_LEQUAL); /* If GL supports multisample, and SDL got a multisample buffer... */ if (glext_check("ARB_multisample")) { SDL_GL_GetAttribute(SDL_GL_MULTISAMPLEBUFFERS, &buffers); if (buffers) glEnable(GL_MULTISAMPLE); } /* Set up HMD display if requested. */ if (hmd) hmd_init(); /* Initialize screen snapshotting. */ snapshot_init(); video_show_cursor(); /* Grab input immediately in HMD mode. */ if (hmd_stat()) SDL_SetWindowGrab(window, SDL_TRUE); return 1; } /* If the mode failed, try it without stereo. */ else if (stereo) { config_set_d(CONFIG_STEREO, 0); return video_mode(f, w, h); } /* If the mode failed, try decreasing the level of multisampling. */ else if (buffers) { config_set_d(CONFIG_MULTISAMPLE, samples / 2); return video_mode(f, w, h); } /* If that mode failed, try it without reflections. */ else if (stencil) { config_set_d(CONFIG_REFLECTION, 0); return video_mode(f, w, h); } /* If THAT mode failed, punt. */ return 0; }
static LRESULT CALLBACK WndProc(HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam) { static int mdown = 0, v; static int mdown_x = 0, mdown_y = 0; static int mup_x = 0, mup_y = 0; switch(message) { case WM_TIMER: if(vdata.shared->audio.output.getplayerstate() != v_audio_playerstate_playing) video_display(); if(mouse_hold_count == 200) break; if(mouse_hold_count > 100) { osd_hide_all(); mouse_hold_count = 200; }else{ mouse_hold_count++; } break; case WM_PAINT: videoout_refresh(0); break; case WM_CREATE: crop_rect_pen = CreatePen(PS_SOLID, 1, RGB(200, 200, 200)); crop_rect_brush = (HBRUSH)GetStockObject(HOLLOW_BRUSH); SetTimer(hwnd, 131, 10, 0); break; case WM_MOUSEMOVE: mouse_hold_count = 0; if(mdown) v = osd_mouse_message(3, LOWORD(lParam), HIWORD(lParam)); else v = osd_mouse_message(0, LOWORD(lParam), HIWORD(lParam)); if(!v) { if(mdown && (video_zoom_x > 1.0 || video_zoom_y > 1.0)) { crop_pos_x = mup_x - LOWORD(lParam) + mdown_x; crop_pos_y = mup_y - HIWORD(lParam) + mdown_y; show_crop_rect = 1; }else{ SendMessage(GetParent(hwnd), message, wParam, lParam); } } break; case WM_LBUTTONDOWN: mdown = 1; mdown_x = LOWORD(lParam); mdown_y = HIWORD(lParam); osd_mouse_message(1, LOWORD(lParam), HIWORD(lParam)); break; case WM_RBUTTONDOWN: osd_mouse_message(2, LOWORD(lParam), HIWORD(lParam)); break; case WM_LBUTTONUP: mdown = 0; mup_x = crop_pos_x; mup_y = crop_pos_y; show_crop_rect = 0; case WM_RBUTTONUP: { RECT rct, rctp; GetWindowRect(GetParent(hwnd), &rctp); GetWindowRect(hwnd, &rct); lParam = MAKELONG(LOWORD(lParam) + (rct.left - rctp.left), HIWORD(lParam) + (rct.top - rctp.top)); SendMessage(GetParent(hwnd), message, wParam, lParam); } break; case WM_KEYDOWN: case WM_LBUTTONDBLCLK: case WM_RBUTTONDBLCLK: if(!osd_mouse_message(1, LOWORD(lParam), HIWORD(lParam))) { SendMessage(GetParent(hwnd), message, wParam, lParam); } break; case WM_DESTROY: DeleteObject(crop_rect_pen); DeleteObject(crop_rect_brush); break; } return DefWindowProc(hwnd, message, wParam, lParam); }
int video_mode(int f, int w, int h) { int stereo = config_get_d(CONFIG_STEREO) ? 1 : 0; int stencil = config_get_d(CONFIG_REFLECTION) ? 1 : 0; int buffers = config_get_d(CONFIG_MULTISAMPLE) ? 1 : 0; int samples = config_get_d(CONFIG_MULTISAMPLE); int vsync = config_get_d(CONFIG_VSYNC) ? 1 : 0; int hmd = config_get_d(CONFIG_HMD) ? 1 : 0; int dpy = config_get_d(CONFIG_DISPLAY); int X = SDL_WINDOWPOS_CENTERED_DISPLAY(dpy); int Y = SDL_WINDOWPOS_CENTERED_DISPLAY(dpy); hmd_free(); if (window) { SDL_GL_DeleteContext(context); SDL_DestroyWindow(window); } SDL_GL_SetAttribute(SDL_GL_STEREO, stereo); SDL_GL_SetAttribute(SDL_GL_STENCIL_SIZE, stencil); SDL_GL_SetAttribute(SDL_GL_MULTISAMPLEBUFFERS, buffers); SDL_GL_SetAttribute(SDL_GL_MULTISAMPLESAMPLES, samples); /* Require 16-bit double buffer with 16-bit depth buffer. */ SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 5); SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 5); SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 5); SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 16); SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1); /* Try to set the currently specified mode. */ window = SDL_CreateWindow("", X, Y, w, h, SDL_WINDOW_OPENGL | (f ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0)); if (window) { set_window_title(TITLE); set_window_icon(ICON); SDL_GetWindowSize(window, &w, &h); config_set_d(CONFIG_DISPLAY, video_display()); config_set_d(CONFIG_FULLSCREEN, f); config_set_d(CONFIG_WIDTH, w); config_set_d(CONFIG_HEIGHT, h); context = SDL_GL_CreateContext(window); SDL_GL_SetSwapInterval(vsync); if (!glext_init()) return 0; glViewport(0, 0, w, h); glClearColor(0.0f, 0.0f, 0.0f, 0.0f); glEnable(GL_NORMALIZE); glEnable(GL_CULL_FACE); glEnable(GL_DEPTH_TEST); glEnable(GL_TEXTURE_2D); glEnable(GL_LIGHTING); glEnable(GL_BLEND); #if !ENABLE_OPENGLES glLightModeli(GL_LIGHT_MODEL_COLOR_CONTROL, GL_SEPARATE_SPECULAR_COLOR); #endif glPixelStorei(GL_PACK_ALIGNMENT, 1); glPixelStorei(GL_UNPACK_ALIGNMENT, 1); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glDepthFunc(GL_LEQUAL); /* If GL supports multisample, and SDL got a multisample buffer... */ if (glext_check("ARB_multisample")) { SDL_GL_GetAttribute(SDL_GL_MULTISAMPLEBUFFERS, &buffers); if (buffers) glEnable(GL_MULTISAMPLE); } /* Set up HMD display if requested. */ if (hmd) hmd_init(); /* Initialize screen snapshotting. */ snapshot_init(); video_show_cursor(); /* Grab input immediately in HMD mode. */ if (hmd_stat()) SDL_SetWindowGrab(window, SDL_TRUE); return 1; } /* If the mode failed, try it without stereo. */ else if (stereo) { config_set_d(CONFIG_STEREO, 0); return video_mode(f, w, h); } /* If the mode failed, try decreasing the level of multisampling. */ else if (buffers) { config_set_d(CONFIG_MULTISAMPLE, samples / 2); return video_mode(f, w, h); } /* If that mode failed, try it without reflections. */ else if (stencil) { config_set_d(CONFIG_REFLECTION, 0); return video_mode(f, w, h); } /* If THAT mode failed, punt. */ return 0; }