static bool video_frame_scale(const void *data, unsigned width, unsigned height, size_t pitch) { driver_t *driver = driver_get_ptr(); RARCH_PERFORMANCE_INIT(video_frame_conv); if (!data) return false; if (video_driver_get_pixel_format() != RETRO_PIXEL_FORMAT_0RGB1555) return false; if (data == RETRO_HW_FRAME_BUFFER_VALID) return false; RARCH_PERFORMANCE_START(video_frame_conv); driver->scaler.in_width = width; driver->scaler.in_height = height; driver->scaler.out_width = width; driver->scaler.out_height = height; driver->scaler.in_stride = pitch; driver->scaler.out_stride = width * sizeof(uint16_t); scaler_ctx_scale(&driver->scaler, driver->scaler_out, data); RARCH_PERFORMANCE_STOP(video_frame_conv); return true; }
static void ffmpeg_scale_input(ffmpeg_t *handle, const struct ffemu_video_data *data) { // Attempt to preserve more information if we scale down. bool shrunk = handle->params.out_width < data->width || handle->params.out_height < data->height; if (handle->video.use_sws) { handle->video.sws = sws_getCachedContext(handle->video.sws, data->width, data->height, handle->video.in_pix_fmt, handle->params.out_width, handle->params.out_height, handle->video.pix_fmt, shrunk ? SWS_BILINEAR : SWS_POINT, NULL, NULL, NULL); int linesize = data->pitch; sws_scale(handle->video.sws, (const uint8_t* const*)&data->data, &linesize, 0, data->height, handle->video.conv_frame->data, handle->video.conv_frame->linesize); } else { if ((int)data->width != handle->video.scaler.in_width || (int)data->height != handle->video.scaler.in_height) { handle->video.scaler.in_width = data->width; handle->video.scaler.in_height = data->height; handle->video.scaler.in_stride = data->pitch; handle->video.scaler.scaler_type = shrunk ? SCALER_TYPE_BILINEAR : SCALER_TYPE_POINT; handle->video.scaler.out_width = handle->params.out_width; handle->video.scaler.out_height = handle->params.out_height; handle->video.scaler.out_stride = handle->video.conv_frame->linesize[0]; scaler_ctx_gen_filter(&handle->video.scaler); } scaler_ctx_scale(&handle->video.scaler, handle->video.conv_frame->data[0], data->data); } }
static void process_image(video4linux_t *v4l, const uint8_t *buffer_yuv) { RARCH_PERFORMANCE_INIT(yuv_convert_direct); RARCH_PERFORMANCE_START(yuv_convert_direct); scaler_ctx_scale(&v4l->scaler, v4l->buffer_output, buffer_yuv); RARCH_PERFORMANCE_STOP(yuv_convert_direct); }
static bool ffemu_push_video_thread(ffemu_t *handle, const struct ffemu_video_data *data) { if (!data->is_dupe) { if (data->width != handle->video.scaler.in_width || data->height != handle->video.scaler.in_height) { handle->video.scaler.in_width = data->width; handle->video.scaler.in_height = data->height; handle->video.scaler.in_stride = data->pitch; // Attempt to preserve more information if we scale down. bool shrunk = handle->params.out_width < data->width || handle->params.out_height < data->height; handle->video.scaler.scaler_type = shrunk ? SCALER_TYPE_BILINEAR : SCALER_TYPE_POINT; handle->video.scaler.out_width = handle->params.out_width; handle->video.scaler.out_height = handle->params.out_height; handle->video.scaler.out_stride = handle->video.conv_frame->linesize[0]; scaler_ctx_gen_filter(&handle->video.scaler); } scaler_ctx_scale(&handle->video.scaler, handle->video.conv_frame->data[0], data->data); } handle->video.conv_frame->pts = handle->video.frame_cnt; AVPacket pkt; if (!encode_video(handle, &pkt, handle->video.conv_frame)) return false; if (pkt.size) { if (av_interleaved_write_frame(handle->muxer.ctx, &pkt) < 0) return false; } handle->video.frame_cnt++; return true; }
static bool switch_frame(void *data, const void *frame, unsigned width, unsigned height, uint64_t frame_count, unsigned pitch, const char *msg, video_frame_info_t *video_info) { static uint64_t last_frame = 0; unsigned x, y; result_t r; int tgtw, tgth, centerx, centery; uint32_t *out_buffer = NULL; switch_video_t *sw = data; int xsf = 1280 / width; int ysf = 720 / height; int sf = xsf; if (ysf < sf) sf = ysf; tgtw = width * sf; tgth = height * sf; centerx = (1280-tgtw)/2; centery = (720-tgth)/2; // clear image to black for(y = 0; y < 720; y++) { for(x = 0; x < 1280; x++) { sw->image[y*1280+x] = 0xFF000000; } } if(width > 0 && height > 0) { if(sw->last_width != width || sw->last_height != height) { scaler_ctx_gen_reset(&sw->scaler); sw->scaler.in_width = width; sw->scaler.in_height = height; sw->scaler.in_stride = pitch; sw->scaler.in_fmt = sw->rgb32 ? SCALER_FMT_ARGB8888 : SCALER_FMT_RGB565; sw->scaler.out_width = tgtw; sw->scaler.out_height = tgth; sw->scaler.out_stride = 1280 * sizeof(uint32_t); sw->scaler.out_fmt = SCALER_FMT_ABGR8888; sw->scaler.scaler_type = SCALER_TYPE_POINT; if(!scaler_ctx_gen_filter(&sw->scaler)) { RARCH_ERR("failed to generate scaler for main image\n"); return false; } sw->last_width = width; sw->last_height = height; } scaler_ctx_scale(&sw->scaler, sw->image + (centery * 1280) + centerx, frame); } #if defined(HAVE_MENU) if (sw->menu_texture.enable) { menu_driver_frame(video_info); if (sw->menu_texture.pixels) { #if 0 if (sw->menu_texture.fullscreen) { #endif scaler_ctx_scale(&sw->menu_texture.scaler, sw->image + ((720-sw->menu_texture.tgth)/2)*1280 + ((1280-sw->menu_texture.tgtw)/2), sw->menu_texture.pixels); #if 0 } else { } #endif } } else if (video_info->statistics_show) { struct font_params *osd_params = (struct font_params*) &video_info->osd_stat_params; if (osd_params) { font_driver_render_msg(video_info, NULL, video_info->stat_text, (const struct font_params*)&video_info->osd_stat_params); } } #endif #if 0 if (frame_count > 6000) { display_finalize(); exit(0); } #endif if (msg && strlen(msg) > 0) RARCH_LOG("message: %s\n", msg); r = surface_dequeue_buffer(&sw->surface, &out_buffer); if (sw->vsync) switch_wait_vsync(sw); svcSleepThread(10000); if(r != RESULT_OK) { return true; // just skip the frame } gfx_slow_swizzling_blit(out_buffer, sw->image, 1280, 720, 0, 0); r = surface_queue_buffer(&sw->surface); if (r != RESULT_OK) return false; last_frame = svcGetSystemTick(); return true; }