static picture_t *ImageConvert( image_handler_t *p_image, picture_t *p_pic, video_format_t *p_fmt_in, video_format_t *p_fmt_out ) { picture_t *p_pif; if( !p_fmt_out->i_width && !p_fmt_out->i_height && p_fmt_out->i_sar_num && p_fmt_out->i_sar_den && p_fmt_out->i_sar_num * p_fmt_in->i_sar_den != p_fmt_out->i_sar_den * p_fmt_in->i_sar_num ) { p_fmt_out->i_width = p_fmt_in->i_sar_num * (int64_t)p_fmt_out->i_sar_den * p_fmt_in->i_width / p_fmt_in->i_sar_den / p_fmt_out->i_sar_num; p_fmt_out->i_visible_width = p_fmt_in->i_sar_num * (int64_t)p_fmt_out->i_sar_den * p_fmt_in->i_visible_width / p_fmt_in->i_sar_den / p_fmt_out->i_sar_num; } if( !p_fmt_out->i_chroma ) p_fmt_out->i_chroma = p_fmt_in->i_chroma; if( !p_fmt_out->i_width ) p_fmt_out->i_width = p_fmt_out->i_visible_width = p_fmt_in->i_width; if( !p_fmt_out->i_height ) p_fmt_out->i_height = p_fmt_out->i_visible_height = p_fmt_in->i_height; if( !p_fmt_out->i_sar_num ) p_fmt_out->i_sar_num = p_fmt_in->i_sar_num; if( !p_fmt_out->i_sar_den ) p_fmt_out->i_sar_den = p_fmt_in->i_sar_den; if( p_image->p_filter ) if( p_image->p_filter->fmt_in.video.i_chroma != p_fmt_in->i_chroma || p_image->p_filter->fmt_out.video.i_chroma != p_fmt_out->i_chroma ) { /* We need to restart a new filter */ DeleteFilter( p_image->p_filter ); p_image->p_filter = NULL; } /* Start a filter */ if( !p_image->p_filter ) { es_format_t fmt_in; es_format_Init( &fmt_in, VIDEO_ES, p_fmt_in->i_chroma ); fmt_in.video = *p_fmt_in; p_image->p_filter = CreateFilter( p_image->p_parent, &fmt_in, p_fmt_out, NULL ); if( !p_image->p_filter ) { return NULL; } } else { /* Filters should handle on-the-fly size changes */ p_image->p_filter->fmt_in.video = *p_fmt_in; p_image->p_filter->fmt_out.video = *p_fmt_out; } picture_Hold( p_pic ); p_pif = p_image->p_filter->pf_video_filter( p_image->p_filter, p_pic ); if( p_fmt_in->i_chroma == p_fmt_out->i_chroma && p_fmt_in->i_width == p_fmt_out->i_width && p_fmt_in->i_height == p_fmt_out->i_height ) { /* Duplicate image */ picture_Release( p_pif ); /* XXX: Better fix must be possible */ p_pif = p_image->p_filter->pf_video_buffer_new( p_image->p_filter ); if( p_pif ) picture_Copy( p_pif, p_pic ); } return p_pif; }
static void VideoBufferDelete( filter_t *p_filter, picture_t *p_picture ) { VLC_UNUSED( p_filter ); picture_Release( p_picture ); }
static int Open(vlc_object_t *p_this) { vout_display_t *vd = (vout_display_t*)p_this; video_format_t fmt = vd->fmt; if (fmt.i_chroma != VLC_CODEC_ANDROID_OPAQUE) return VLC_EGENERIC; /* Allocate structure */ vout_display_sys_t *sys = (struct vout_display_sys_t*)calloc(1, sizeof(*sys)); if (!sys) return VLC_ENOMEM; sys->p_library = LoadNativeWindowAPI(&sys->native_window); if (!sys->p_library) { free(sys); msg_Err(vd, "Could not initialize NativeWindow API."); return VLC_EGENERIC; } sys->fmt = fmt; video_format_t subpicture_format = sys->fmt; subpicture_format.i_chroma = VLC_CODEC_RGBA; /* Create a RGBA picture for rendering subtitles. */ sys->subtitles_picture = picture_NewFromFormat(&subpicture_format); /* Export the subpicture capability of this vout. */ vd->info.subpicture_chromas = subpicture_chromas; int i_pictures = POOL_SIZE; picture_t** pictures = calloc(sizeof(*pictures), i_pictures); if (!pictures) goto error; for (int i = 0; i < i_pictures; i++) { picture_sys_t *p_picsys = calloc(1, sizeof(*p_picsys)); if (unlikely(p_picsys == NULL)) goto error; picture_resource_t resource = { .p_sys = p_picsys }; picture_t *picture = picture_NewFromResource(&fmt, &resource); if (!picture) { free(p_picsys); goto error; } pictures[i] = picture; } /* Wrap it into a picture pool */ picture_pool_configuration_t pool_cfg; memset(&pool_cfg, 0, sizeof(pool_cfg)); pool_cfg.picture_count = i_pictures; pool_cfg.picture = pictures; pool_cfg.lock = LockSurface; pool_cfg.unlock = UnlockSurface; sys->pool = picture_pool_NewExtended(&pool_cfg); if (!sys->pool) { for (int i = 0; i < i_pictures; i++) picture_Release(pictures[i]); goto error; } /* Setup vout_display */ vd->sys = sys; vd->fmt = fmt; vd->pool = Pool; vd->display = Display; vd->control = Control; vd->prepare = NULL; vd->manage = Manage; /* Fix initial state */ vout_display_SendEventFullscreen(vd, false); return VLC_SUCCESS; error: free(pictures); Close(p_this); return VLC_ENOMEM; }
static int Del( sout_stream_t *p_stream, sout_stream_id_t *id ) { VLC_UNUSED(id); sout_stream_sys_t *p_sys = p_stream->p_sys; bridge_t *p_bridge; bridged_es_t *p_es; bool b_last_es = true; int i; if( !p_sys->b_inited ) return VLC_SUCCESS; if( p_sys->p_decoder != NULL ) { decoder_owner_sys_t *p_owner = p_sys->p_decoder->p_owner; if( p_sys->p_decoder->p_module ) module_unneed( p_sys->p_decoder, p_sys->p_decoder->p_module ); if( p_sys->p_decoder->p_description ) vlc_meta_Delete( p_sys->p_decoder->p_description ); vlc_object_release( p_sys->p_decoder ); free( p_owner ); } /* Destroy user specified video filters */ if( p_sys->p_vf2 ) filter_chain_Delete( p_sys->p_vf2 ); vlc_global_lock( VLC_MOSAIC_MUTEX ); p_bridge = GetBridge( p_stream ); p_es = p_sys->p_es; p_es->b_empty = true; while ( p_es->p_picture ) { picture_t *p_next = p_es->p_picture->p_next; picture_Release( p_es->p_picture ); p_es->p_picture = p_next; } for ( i = 0; i < p_bridge->i_es_num; i++ ) { if ( !p_bridge->pp_es[i]->b_empty ) { b_last_es = false; break; } } if ( b_last_es ) { vlc_object_t *p_libvlc = VLC_OBJECT( p_stream->p_libvlc ); for ( i = 0; i < p_bridge->i_es_num; i++ ) free( p_bridge->pp_es[i] ); free( p_bridge->pp_es ); free( p_bridge ); var_Destroy( p_libvlc, "mosaic-struct" ); } vlc_global_unlock( VLC_MOSAIC_MUTEX ); if ( p_sys->p_image ) { image_HandlerDelete( p_sys->p_image ); } p_sys->b_inited = false; return VLC_SUCCESS; }
inline static void video_del_buffer_filter( filter_t *p_this, picture_t *p_pic ) { VLC_UNUSED(p_this); picture_Release( p_pic ); }
/***************************************************************************** * Render: displays previously rendered output ***************************************************************************** * This function send the currently rendered image to Invert image, waits * until it is displayed and switch the two rendering buffers, preparing next * frame. *****************************************************************************/ static picture_t *Filter( filter_t *p_filter, picture_t *p_pic ) { picture_t *p_outpic; int i_index; int i_planes; if( !p_pic ) return NULL; p_outpic = filter_NewPicture( p_filter ); if( !p_outpic ) { msg_Warn( p_filter, "can't get output picture" ); picture_Release( p_pic ); return NULL; } if( p_pic->format.i_chroma == VLC_CODEC_YUVA ) { /* We don't want to invert the alpha plane */ i_planes = p_pic->i_planes - 1; memcpy( p_outpic->p[A_PLANE].p_pixels, p_pic->p[A_PLANE].p_pixels, p_pic->p[A_PLANE].i_pitch * p_pic->p[A_PLANE].i_lines ); } else { i_planes = p_pic->i_planes; } for( i_index = 0 ; i_index < i_planes ; i_index++ ) { uint8_t *p_in, *p_in_end, *p_line_end, *p_out; p_in = p_pic->p[i_index].p_pixels; p_in_end = p_in + p_pic->p[i_index].i_visible_lines * p_pic->p[i_index].i_pitch; p_out = p_outpic->p[i_index].p_pixels; for( ; p_in < p_in_end ; ) { uint64_t *p_in64, *p_out64; p_line_end = p_in + p_pic->p[i_index].i_visible_pitch - 64; p_in64 = (uint64_t*)p_in; p_out64 = (uint64_t*)p_out; while( p_in64 < (uint64_t *)p_line_end ) { /* Do 64 pixels at a time */ *p_out64++ = ~*p_in64++; *p_out64++ = ~*p_in64++; *p_out64++ = ~*p_in64++; *p_out64++ = ~*p_in64++; *p_out64++ = ~*p_in64++; *p_out64++ = ~*p_in64++; *p_out64++ = ~*p_in64++; *p_out64++ = ~*p_in64++; } p_in = (uint8_t*)p_in64; p_out = (uint8_t*)p_out64; p_line_end += 64; for( ; p_in < p_line_end ; ) { *p_out++ = ~( *p_in++ ); } p_in += p_pic->p[i_index].i_pitch - p_pic->p[i_index].i_visible_pitch; p_out += p_outpic->p[i_index].i_pitch - p_outpic->p[i_index].i_visible_pitch; } } return CopyInfoAndRelease( p_outpic, p_pic ); }
/***************************************************************************** * Run the filter on a Packed YUV picture *****************************************************************************/ static picture_t *FilterPacked( filter_t *p_filter, picture_t *p_pic ) { int pi_luma[256]; int pi_gamma[256]; picture_t *p_outpic; uint8_t *p_in, *p_in_end, *p_line_end; uint8_t *p_out; int i_y_offset, i_u_offset, i_v_offset; int i_pitch, i_visible_pitch; bool b_thres; double f_hue; double f_gamma; int32_t i_cont, i_lum; int i_sat, i_sin, i_cos, i_x, i_y; int i; filter_sys_t *p_sys = p_filter->p_sys; if( !p_pic ) return NULL; i_pitch = p_pic->p->i_pitch; i_visible_pitch = p_pic->p->i_visible_pitch; if( GetPackedYuvOffsets( p_pic->format.i_chroma, &i_y_offset, &i_u_offset, &i_v_offset ) != VLC_SUCCESS ) { msg_Warn( p_filter, "Unsupported input chroma (%4.4s)", (char*)&(p_pic->format.i_chroma) ); picture_Release( p_pic ); return NULL; } p_outpic = filter_NewPicture( p_filter ); if( !p_outpic ) { msg_Warn( p_filter, "can't get output picture" ); picture_Release( p_pic ); return NULL; } /* Get variables */ vlc_mutex_lock( &p_sys->lock ); i_cont = (int)( p_sys->f_contrast * 255 ); i_lum = (int)( (p_sys->f_brightness - 1.0)*255 ); f_hue = p_sys->f_hue * (float)(M_PI / 180.); i_sat = (int)( p_sys->f_saturation * 256 ); f_gamma = 1.0 / p_sys->f_gamma; b_thres = p_sys->b_brightness_threshold; vlc_mutex_unlock( &p_sys->lock ); /* * Threshold mode drops out everything about luma, contrast and gamma. */ if( !b_thres ) { /* Contrast is a fast but kludged function, so I put this gap to be * cleaner :) */ i_lum += 128 - i_cont / 2; /* Fill the gamma lookup table */ for( i = 0 ; i < 256 ; i++ ) { pi_gamma[ i ] = clip_uint8_vlc( pow(i / 255.0, f_gamma) * 255.0); } /* Fill the luma lookup table */ for( i = 0 ; i < 256 ; i++ ) { pi_luma[ i ] = pi_gamma[clip_uint8_vlc( i_lum + i_cont * i / 256)]; } } else { /* * We get luma as threshold value: the higher it is, the darker is * the image. Should I reverse this? */ for( i = 0 ; i < 256 ; i++ ) { pi_luma[ i ] = (i < i_lum) ? 0 : 255; } /* * Desaturates image to avoid that strange yellow halo... */ i_sat = 0; } /* * Do the Y plane */ p_in = p_pic->p->p_pixels + i_y_offset; p_in_end = p_in + p_pic->p->i_visible_lines * p_pic->p->i_pitch - 8 * 4; p_out = p_outpic->p->p_pixels + i_y_offset; for( ; p_in < p_in_end ; ) { p_line_end = p_in + i_visible_pitch - 8 * 4; for( ; p_in < p_line_end ; ) { /* Do 8 pixels at a time */ *p_out = pi_luma[ *p_in ]; p_in += 2; p_out += 2; *p_out = pi_luma[ *p_in ]; p_in += 2; p_out += 2; *p_out = pi_luma[ *p_in ]; p_in += 2; p_out += 2; *p_out = pi_luma[ *p_in ]; p_in += 2; p_out += 2; *p_out = pi_luma[ *p_in ]; p_in += 2; p_out += 2; *p_out = pi_luma[ *p_in ]; p_in += 2; p_out += 2; *p_out = pi_luma[ *p_in ]; p_in += 2; p_out += 2; *p_out = pi_luma[ *p_in ]; p_in += 2; p_out += 2; } p_line_end += 8 * 4; for( ; p_in < p_line_end ; ) { *p_out = pi_luma[ *p_in ]; p_in += 2; p_out += 2; } p_in += i_pitch - p_pic->p->i_visible_pitch; p_out += i_pitch - p_outpic->p->i_visible_pitch; } /* * Do the U and V planes */ i_sin = sin(f_hue) * 256; i_cos = cos(f_hue) * 256; i_x = ( cos(f_hue) + sin(f_hue) ) * 32768; i_y = ( cos(f_hue) - sin(f_hue) ) * 32768; if ( i_sat > 256 ) { if ( p_sys->pf_process_sat_hue_clip( p_pic, p_outpic, i_sin, i_cos, i_sat, i_x, i_y ) != VLC_SUCCESS ) { /* Currently only one error can happen in the function, but if there * will be more of them, this message must go away */ msg_Warn( p_filter, "Unsupported input chroma (%4.4s)", (char*)&(p_pic->format.i_chroma) ); picture_Release( p_pic ); return NULL; } } else { if ( p_sys->pf_process_sat_hue( p_pic, p_outpic, i_sin, i_cos, i_sat, i_x, i_y ) != VLC_SUCCESS ) { /* Currently only one error can happen in the function, but if there * will be more of them, this message must go away */ msg_Warn( p_filter, "Unsupported input chroma (%4.4s)", (char*)&(p_pic->format.i_chroma) ); picture_Release( p_pic ); return NULL; } } return CopyInfoAndRelease( p_outpic, p_pic ); }
/***************************************************************************** * Command functions *****************************************************************************/ static int exec_DataSharedMem( filter_t *p_filter, const commandparams_t *p_params, commandparams_t *p_results ) { #if defined(HAVE_SYS_SHM_H) filter_sys_t *p_sys = (filter_sys_t*) p_filter->p_sys; struct shmid_ds shminfo; overlay_t *p_ovl; size_t i_size; VLC_UNUSED(p_results); p_ovl = ListGet( &p_sys->overlays, p_params->i_id ); if( p_ovl == NULL ) { msg_Err( p_filter, "Invalid overlay: %d", p_params->i_id ); return VLC_EGENERIC; } if( shmctl( p_params->i_shmid, IPC_STAT, &shminfo ) == -1 ) { msg_Err( p_filter, "Unable to access shared memory" ); return VLC_EGENERIC; } i_size = shminfo.shm_segsz; if( p_params->fourcc == VLC_CODEC_TEXT ) { char *p_data; if( (p_params->i_height != 1) || (p_params->i_width < 1) ) { msg_Err( p_filter, "Invalid width and/or height. when specifying text height " "must be 1 and width the number of bytes in the string, " "including the null terminator" ); return VLC_EGENERIC; } if( (size_t)p_params->i_width > i_size ) { msg_Err( p_filter, "Insufficient data in shared memory. need %d, got %zu", p_params->i_width, i_size ); return VLC_EGENERIC; } p_ovl->data.p_text = malloc( p_params->i_width ); if( p_ovl->data.p_text == NULL ) { msg_Err( p_filter, "Unable to allocate string storage" ); return VLC_ENOMEM; } video_format_Setup( &p_ovl->format, VLC_CODEC_TEXT, 0, 0, 0, 1 ); p_data = shmat( p_params->i_shmid, NULL, SHM_RDONLY ); if( p_data == NULL ) { msg_Err( p_filter, "Unable to attach to shared memory" ); free( p_ovl->data.p_text ); p_ovl->data.p_text = NULL; return VLC_ENOMEM; } memcpy( p_ovl->data.p_text, p_data, p_params->i_width ); shmdt( p_data ); } else { uint8_t *p_data, *p_in; size_t i_neededsize = 0; p_ovl->data.p_pic = picture_New( p_params->fourcc, p_params->i_width, p_params->i_height, 1, 1 ); if( p_ovl->data.p_pic == NULL ) return VLC_ENOMEM; p_ovl->format = p_ovl->data.p_pic->format; for( size_t i_plane = 0; i_plane < (size_t)p_ovl->data.p_pic->i_planes; ++i_plane ) { i_neededsize += p_ovl->data.p_pic->p[i_plane].i_visible_lines * p_ovl->data.p_pic->p[i_plane].i_visible_pitch; } if( i_neededsize > i_size ) { msg_Err( p_filter, "Insufficient data in shared memory. need %zu, got %zu", i_neededsize, i_size ); picture_Release( p_ovl->data.p_pic ); p_ovl->data.p_pic = NULL; return VLC_EGENERIC; } p_data = shmat( p_params->i_shmid, NULL, SHM_RDONLY ); if( p_data == NULL ) { msg_Err( p_filter, "Unable to attach to shared memory" ); picture_Release( p_ovl->data.p_pic ); p_ovl->data.p_pic = NULL; return VLC_ENOMEM; } p_in = p_data; for( size_t i_plane = 0; i_plane < (size_t)p_ovl->data.p_pic->i_planes; ++i_plane ) { uint8_t *p_out = p_ovl->data.p_pic->p[i_plane].p_pixels; for( size_t i_line = 0; i_line < (size_t)p_ovl->data.p_pic->p[i_plane].i_visible_lines; ++i_line ) { vlc_memcpy( p_out, p_in, p_ovl->data.p_pic->p[i_plane].i_visible_pitch ); p_out += p_ovl->data.p_pic->p[i_plane].i_pitch; p_in += p_ovl->data.p_pic->p[i_plane].i_visible_pitch; } } shmdt( p_data ); } p_sys->b_updated = p_ovl->b_active; return VLC_SUCCESS; #else VLC_UNUSED(p_params); VLC_UNUSED(p_results); msg_Err( p_filter, "system doesn't support shared memory" ); return VLC_EGENERIC; #endif }
/** * Return a direct buffer */ static picture_pool_t *Pool (vout_display_t *vd, unsigned requested_count) { vout_display_sys_t *sys = vd->sys; (void)requested_count; if (sys->pool) return sys->pool; vout_display_place_t place; vout_display_PlacePicture (&place, &vd->source, vd->cfg, false); /* */ const uint32_t values[] = { place.x, place.y, place.width, place.height }; xcb_configure_window (sys->conn, sys->window, XCB_CONFIG_WINDOW_X | XCB_CONFIG_WINDOW_Y | XCB_CONFIG_WINDOW_WIDTH | XCB_CONFIG_WINDOW_HEIGHT, values); picture_t *pic = picture_NewFromFormat (&vd->fmt); if (!pic) return NULL; assert (pic->i_planes == 1); picture_resource_t res = { .p = { [0] = { .i_lines = pic->p->i_lines, .i_pitch = pic->p->i_pitch, }, }, }; picture_Release (pic); unsigned count; picture_t *pic_array[MAX_PICTURES]; const size_t size = res.p->i_pitch * res.p->i_lines; for (count = 0; count < MAX_PICTURES; count++) { xcb_shm_seg_t seg = (sys->seg_base != 0) ? (sys->seg_base + count) : 0; if (XCB_picture_Alloc (vd, &res, size, sys->conn, seg)) break; pic_array[count] = XCB_picture_NewFromResource (&vd->fmt, &res); if (unlikely(pic_array[count] == NULL)) { if (seg != 0) xcb_shm_detach (sys->conn, seg); break; } } xcb_flush (sys->conn); if (count == 0) return NULL; sys->pool = picture_pool_New (count, pic_array); if (unlikely(sys->pool == NULL)) while (count > 0) picture_Release(pic_array[--count]); return sys->pool; }
static void* vnc_worker_thread( vlc_object_t *p_thread_obj ) { filter_t* p_filter = (filter_t*)(p_thread_obj->p_parent); filter_sys_t *p_sys = p_filter->p_sys; vlc_object_t *p_update_request_thread; int canc = vlc_savecancel (); msg_Dbg( p_filter, "VNC worker thread started" ); if( !open_vnc_connection ( p_filter ) ) { msg_Err( p_filter, "Could not connect to vnc host" ); goto exit; } if( !handshaking ( p_filter ) ) { msg_Err( p_filter, "Error occured while handshaking vnc host" ); goto exit; } p_sys->b_connection_active = true; /* to enable sending key * and mouse events to host */ /* Create an empty picture for VNC the data */ vlc_mutex_lock( &p_sys->lock ); p_sys->p_pic = picture_New( VLC_CODEC_YUVA, p_sys->i_vnc_width, p_sys->i_vnc_height, 1, 1 ); if( !p_sys->p_pic ) { vlc_mutex_unlock( &p_sys->lock ); goto exit; } p_sys->i_vnc_pixels = p_sys->i_vnc_width * p_sys->i_vnc_height; vlc_mutex_unlock( &p_sys->lock ); /* create the update request thread */ p_update_request_thread = vlc_object_create( p_filter, sizeof( vlc_object_t ) ); vlc_object_attach( p_update_request_thread, p_filter ); if( vlc_thread_create( p_update_request_thread, update_request_thread, VLC_THREAD_PRIORITY_LOW ) ) { vlc_object_release( p_update_request_thread ); msg_Err( p_filter, "cannot spawn vnc update request thread" ); goto exit; } /* connection is initialized, now read and handle server messages */ while( vlc_object_alive( p_thread_obj ) ) { rfbServerToClientMsg msg; int i_msgSize; memset( &msg, 0, sizeof(msg) ); if( !read_exact(p_filter, p_sys->i_socket, (char*)&msg, 1 ) ) { msg_Err( p_filter, "Error while waiting for next server message"); break; } switch (msg.type) { case rfbFramebufferUpdate: i_msgSize = sz_rfbFramebufferUpdateMsg; break; case rfbSetColourMapEntries: i_msgSize = sz_rfbSetColourMapEntriesMsg; break; case rfbBell: i_msgSize = sz_rfbBellMsg; break; case rfbServerCutText: i_msgSize = sz_rfbServerCutTextMsg; break; case rfbReSizeFrameBuffer: i_msgSize = sz_rfbReSizeFrameBufferMsg; break; default: i_msgSize = 0; msg_Err( p_filter, "Invalid message %u received", msg.type ); break; } if( i_msgSize <= 0 ) break; if( --i_msgSize > 0 ) { if ( !read_exact( p_filter, p_sys->i_socket, ((char*)&msg)+1, i_msgSize ) ) { msg_Err( p_filter, "Error while reading message of type %u", msg.type ); break; } } process_server_message( p_filter, &msg); } msg_Dbg( p_filter, "joining update_request_thread" ); vlc_object_kill( p_update_request_thread ); vlc_thread_join( p_update_request_thread ); vlc_object_release( p_update_request_thread ); msg_Dbg( p_filter, "released update_request_thread" ); exit: vlc_mutex_lock( &p_sys->lock ); p_sys->b_connection_active = false; if (p_sys->i_socket >= 0) net_Close(p_sys->i_socket); if( p_sys->p_pic ) picture_Release( p_sys->p_pic ); /* It will hide the subtitle */ p_sys->b_continue = false; p_sys->b_need_update = true; vlc_mutex_unlock( &p_sys->lock ); msg_Dbg( p_filter, "VNC message reader thread ended" ); vlc_restorecancel (canc); return NULL; }
static int Open(vlc_object_t *p_this) { vout_display_t *vd = (vout_display_t *)p_this; vout_display_t *p_dec = vd; char ppsz_components[MAX_COMPONENTS_LIST_SIZE][OMX_MAX_STRINGNAME_SIZE]; picture_t** pictures = NULL; OMX_PARAM_PORTDEFINITIONTYPE *def; static OMX_CALLBACKTYPE callbacks = { OmxEventHandler, OmxEmptyBufferDone, OmxFillBufferDone }; if (InitOmxCore(p_this) != VLC_SUCCESS) return VLC_EGENERIC; int components = CreateComponentsList(p_this, "iv_renderer", ppsz_components); if (components <= 0) { DeinitOmxCore(); return VLC_EGENERIC; } /* Allocate structure */ vout_display_sys_t *p_sys = (struct vout_display_sys_t*) calloc(1, sizeof(*p_sys)); if (!p_sys) { DeinitOmxCore(); return VLC_ENOMEM; } vd->sys = p_sys; strcpy(p_sys->psz_component, ppsz_components[0]); /* Load component */ OMX_ERRORTYPE omx_error = pf_get_handle(&p_sys->omx_handle, p_sys->psz_component, vd, &callbacks); CHECK_ERROR(omx_error, "OMX_GetHandle(%s) failed (%x: %s)", p_sys->psz_component, omx_error, ErrorToString(omx_error)); InitOmxEventQueue(&p_sys->event_queue); OMX_FIFO_INIT(&p_sys->port.fifo, pOutputPortPrivate); p_sys->port.b_direct = false; p_sys->port.b_flushed = true; OMX_PORT_PARAM_TYPE param; OMX_INIT_STRUCTURE(param); omx_error = OMX_GetParameter(p_sys->omx_handle, OMX_IndexParamVideoInit, ¶m); CHECK_ERROR(omx_error, "OMX_GetParameter(OMX_IndexParamVideoInit) failed (%x: %s)", omx_error, ErrorToString(omx_error)); p_sys->port.i_port_index = param.nStartPortNumber; p_sys->port.b_valid = true; p_sys->port.omx_handle = p_sys->omx_handle; def = &p_sys->port.definition; OMX_INIT_STRUCTURE(*def); def->nPortIndex = p_sys->port.i_port_index; omx_error = OMX_GetParameter(p_sys->omx_handle, OMX_IndexParamPortDefinition, def); CHECK_ERROR(omx_error, "OMX_GetParameter(OMX_IndexParamPortDefinition) failed (%x: %s)", omx_error, ErrorToString(omx_error)); #define ALIGN(x, y) (((x) + ((y) - 1)) & ~((y) - 1)) def->format.video.nFrameWidth = vd->fmt.i_width; def->format.video.nFrameHeight = vd->fmt.i_height; def->format.video.nStride = 0; def->format.video.nSliceHeight = 0; p_sys->port.definition.format.video.eColorFormat = OMX_COLOR_FormatYUV420PackedPlanar; if (!strcmp(p_sys->psz_component, "OMX.broadcom.video_render")) { def->format.video.nSliceHeight = ALIGN(def->format.video.nFrameHeight, 16); } omx_error = OMX_SetParameter(p_sys->omx_handle, OMX_IndexParamPortDefinition, &p_sys->port.definition); CHECK_ERROR(omx_error, "OMX_SetParameter(OMX_IndexParamPortDefinition) failed (%x: %s)", omx_error, ErrorToString(omx_error)); OMX_GetParameter(p_sys->omx_handle, OMX_IndexParamPortDefinition, &p_sys->port.definition); if (def->format.video.nStride < (int) def->format.video.nFrameWidth) def->format.video.nStride = def->format.video.nFrameWidth; if (def->format.video.nSliceHeight < def->format.video.nFrameHeight) def->format.video.nSliceHeight = def->format.video.nFrameHeight; p_sys->port.pp_buffers = malloc(p_sys->port.definition.nBufferCountActual * sizeof(OMX_BUFFERHEADERTYPE*)); p_sys->port.i_buffers = p_sys->port.definition.nBufferCountActual; omx_error = OMX_SendCommand(p_sys->omx_handle, OMX_CommandStateSet, OMX_StateIdle, 0); CHECK_ERROR(omx_error, "OMX_CommandStateSet Idle failed (%x: %s)", omx_error, ErrorToString(omx_error)); unsigned int i; for (i = 0; i < p_sys->port.i_buffers; i++) { omx_error = OMX_AllocateBuffer(p_sys->omx_handle, &p_sys->port.pp_buffers[i], p_sys->port.i_port_index, 0, p_sys->port.definition.nBufferSize); if (omx_error != OMX_ErrorNone) break; OMX_FIFO_PUT(&p_sys->port.fifo, p_sys->port.pp_buffers[i]); } if (omx_error != OMX_ErrorNone) { p_sys->port.i_buffers = i; for (i = 0; i < p_sys->port.i_buffers; i++) OMX_FreeBuffer(p_sys->omx_handle, p_sys->port.i_port_index, p_sys->port.pp_buffers[i]); msg_Err(vd, "OMX_AllocateBuffer failed (%x: %s)", omx_error, ErrorToString(omx_error)); goto error; } omx_error = WaitForSpecificOmxEvent(&p_sys->event_queue, OMX_EventCmdComplete, 0, 0, 0); CHECK_ERROR(omx_error, "Wait for Idle failed (%x: %s)", omx_error, ErrorToString(omx_error)); omx_error = OMX_SendCommand(p_sys->omx_handle, OMX_CommandStateSet, OMX_StateExecuting, 0); CHECK_ERROR(omx_error, "OMX_CommandStateSet Executing failed (%x: %s)", omx_error, ErrorToString(omx_error)); omx_error = WaitForSpecificOmxEvent(&p_sys->event_queue, OMX_EventCmdComplete, 0, 0, 0); CHECK_ERROR(omx_error, "Wait for Executing failed (%x: %s)", omx_error, ErrorToString(omx_error)); if (!strcmp(p_sys->psz_component, "OMX.broadcom.video_render")) { OMX_CONFIG_DISPLAYREGIONTYPE config_display; OMX_INIT_STRUCTURE(config_display); config_display.nPortIndex = p_sys->port.i_port_index; config_display.set = OMX_DISPLAY_SET_SRC_RECT; config_display.src_rect.width = vd->cfg->display.width; config_display.src_rect.height = vd->cfg->display.height; OMX_SetConfig(p_sys->omx_handle, OMX_IndexConfigDisplayRegion, &config_display); config_display.set = OMX_DISPLAY_SET_FULLSCREEN; config_display.fullscreen = OMX_TRUE; OMX_SetConfig(p_sys->omx_handle, OMX_IndexConfigDisplayRegion, &config_display); UpdateDisplaySize(vd, vd->cfg); } /* Setup chroma */ video_format_t fmt = vd->fmt; fmt.i_chroma = VLC_CODEC_I420; video_format_FixRgb(&fmt); /* Setup vout_display */ vd->fmt = fmt; vd->pool = Pool; vd->display = Display; vd->control = Control; vd->prepare = NULL; vd->manage = NULL; /* Create the associated picture */ pictures = calloc(p_sys->port.i_buffers, sizeof(*pictures)); if (!pictures) goto error; for (unsigned int i = 0; i < p_sys->port.i_buffers; i++) { picture_sys_t *picsys = malloc(sizeof(*picsys)); if (unlikely(picsys == NULL)) goto error; picsys->sys = p_sys; picture_resource_t resource = { .p_sys = picsys }; picture_t *picture = picture_NewFromResource(&fmt, &resource); if (unlikely(picture == NULL)) { free(picsys); goto error; } pictures[i] = picture; } /* Wrap it into a picture pool */ picture_pool_configuration_t pool_cfg; memset(&pool_cfg, 0, sizeof(pool_cfg)); pool_cfg.picture_count = p_sys->port.i_buffers; pool_cfg.picture = pictures; pool_cfg.lock = LockSurface; pool_cfg.unlock = UnlockSurface; p_sys->pool = picture_pool_NewExtended(&pool_cfg); if (!p_sys->pool) { for (unsigned int i = 0; i < p_sys->port.i_buffers; i++) picture_Release(pictures[i]); goto error; } /* Fix initial state */ vout_display_SendEventFullscreen(vd, true); free(pictures); return VLC_SUCCESS; error: free(pictures); Close(p_this); return VLC_EGENERIC; }
/***************************************************************************** * Render: displays previously rendered output ***************************************************************************** * This function send the currently rendered image to Distort image, waits * until it is displayed and switch the two rendering buffers, preparing next * frame. *****************************************************************************/ static picture_t *Filter( filter_t *p_filter, picture_t *p_pic ) { picture_t *p_outpic; unsigned int w, h; uint8_t u,v; picture_t *p_converted; video_format_t fmt_out; memset( &fmt_out, 0, sizeof(video_format_t) ); fmt_out.p_palette = NULL; if( !p_pic ) return NULL; p_outpic = filter_NewPicture( p_filter ); if( !p_outpic ) { picture_Release( p_pic ); return NULL; } if( !p_filter->p_sys->p_image ) p_filter->p_sys->p_image = image_HandlerCreate( p_filter ); /* chrominance */ u = p_filter->p_sys->u; v = p_filter->p_sys->v; for( int y = 0; y < p_outpic->p[U_PLANE].i_lines; y++ ) { memset( p_outpic->p[U_PLANE].p_pixels+y*p_outpic->p[U_PLANE].i_pitch, u, p_outpic->p[U_PLANE].i_pitch ); memset( p_outpic->p[V_PLANE].p_pixels+y*p_outpic->p[V_PLANE].i_pitch, v, p_outpic->p[V_PLANE].i_pitch ); if( v == 0 && u != 0 ) u --; else if( u == 0xff ) v --; else if( v == 0xff ) u ++; else if( u == 0 ) v ++; } /* luminance */ plane_CopyPixels( &p_outpic->p[Y_PLANE], &p_pic->p[Y_PLANE] ); /* image visualization */ fmt_out = p_filter->fmt_out.video; fmt_out.i_width = p_filter->fmt_out.video.i_width*p_filter->p_sys->scale/150; fmt_out.i_height = p_filter->fmt_out.video.i_height*p_filter->p_sys->scale/150; fmt_out.i_visible_width = fmt_out.i_width; fmt_out.i_visible_height = fmt_out.i_height; p_converted = image_Convert( p_filter->p_sys->p_image, p_pic, &(p_pic->format), &fmt_out ); if( p_converted ) { #define copyimage( plane, b ) \ for( int y = 0; y<p_converted->p[plane].i_visible_lines; y++ ) { \ for( int x = 0; x<p_converted->p[plane].i_visible_pitch; x++ ) { \ int nx, ny; \ if( p_filter->p_sys->yinc == 1 ) \ ny= y; \ else \ ny = p_converted->p[plane].i_visible_lines-y; \ if( p_filter->p_sys->xinc == 1 ) \ nx = x; \ else \ nx = p_converted->p[plane].i_visible_pitch-x; \ p_outpic->p[plane].p_pixels[(p_filter->p_sys->x*b+nx)+(ny+p_filter->p_sys->y*b)*p_outpic->p[plane].i_pitch ] = p_converted->p[plane].p_pixels[y*p_converted->p[plane].i_pitch+x]; \ } } copyimage( Y_PLANE, 2 ); copyimage( U_PLANE, 1 ); copyimage( V_PLANE, 1 ); #undef copyimage picture_Release( p_converted ); } else { msg_Err( p_filter, "Image scaling failed miserably." ); } p_filter->p_sys->x += p_filter->p_sys->xinc; p_filter->p_sys->y += p_filter->p_sys->yinc; p_filter->p_sys->scale += p_filter->p_sys->scaleinc; if( p_filter->p_sys->scale >= 50 ) p_filter->p_sys->scaleinc = -1; if( p_filter->p_sys->scale <= 1 ) p_filter->p_sys->scaleinc = 1; w = p_filter->fmt_out.video.i_width*p_filter->p_sys->scale/150; h = p_filter->fmt_out.video.i_height*p_filter->p_sys->scale/150; if( p_filter->p_sys->x*2 + w >= p_filter->fmt_out.video.i_width ) p_filter->p_sys->xinc = -1; if( p_filter->p_sys->x <= 0 ) p_filter->p_sys->xinc = 1; if( p_filter->p_sys->x*2 + w >= p_filter->fmt_out.video.i_width ) p_filter->p_sys->x = (p_filter->fmt_out.video.i_width-w)/2; if( p_filter->p_sys->y*2 + h >= p_filter->fmt_out.video.i_height ) p_filter->p_sys->y = (p_filter->fmt_out.video.i_height-h)/2; if( p_filter->p_sys->y*2 + h >= p_filter->fmt_out.video.i_height ) p_filter->p_sys->yinc = -1; if( p_filter->p_sys->y <= 0 ) p_filter->p_sys->yinc = 1; for( int y = 0; y < 16; y++ ) { if( p_filter->p_sys->v == 0 && p_filter->p_sys->u != 0 ) p_filter->p_sys->u -= 1; else if( p_filter->p_sys->u == 0xff ) p_filter->p_sys->v -= 1; else if( p_filter->p_sys->v == 0xff ) p_filter->p_sys->u += 1; else if( p_filter->p_sys->u == 0 ) p_filter->p_sys->v += 1; } return CopyInfoAndRelease( p_outpic, p_pic ); }
static void video_unlink_picture( decoder_t *p_dec, picture_t *p_pic ) { (void)p_dec; picture_Release( p_pic ); }
static void video_del_buffer( decoder_t *p_dec, picture_t *p_pic ) { (void)p_dec; picture_Release( p_pic ); }
static void* EncoderThread( void *obj ) { sout_stream_sys_t *p_sys = (sout_stream_sys_t*)obj; sout_stream_id_sys_t *id = p_sys->id_video; picture_t *p_pic = NULL; int canc = vlc_savecancel (); block_t *p_block = NULL; for( ;; ) { vlc_mutex_lock( &p_sys->lock_out ); while( !p_sys->b_abort && (p_pic = picture_fifo_Pop( p_sys->pp_pics )) == NULL ) vlc_cond_wait( &p_sys->cond, &p_sys->lock_out ); if( p_sys->b_abort && !p_pic ) { vlc_mutex_unlock( &p_sys->lock_out ); break; } vlc_mutex_unlock( &p_sys->lock_out ); if( p_pic ) { p_block = id->p_encoder->pf_encode_video( id->p_encoder, p_pic ); vlc_mutex_lock( &p_sys->lock_out ); block_ChainAppend( &p_sys->p_buffers, p_block ); vlc_mutex_unlock( &p_sys->lock_out ); picture_Release( p_pic ); } vlc_mutex_lock( &p_sys->lock_out ); if( p_sys->b_abort ) { vlc_mutex_unlock( &p_sys->lock_out ); break; } vlc_mutex_unlock( &p_sys->lock_out ); } /*Encode what we have in the buffer on closing*/ vlc_mutex_lock( &p_sys->lock_out ); while( (p_pic = picture_fifo_Pop( p_sys->pp_pics )) != NULL ) { p_block = id->p_encoder->pf_encode_video( id->p_encoder, p_pic ); block_ChainAppend( &p_sys->p_buffers, p_block ); picture_Release( p_pic ); } /*Now flush encoder*/ do { p_block = id->p_encoder->pf_encode_video(id->p_encoder, NULL ); block_ChainAppend( &p_sys->p_buffers, p_block ); } while( p_block ); vlc_mutex_unlock( &p_sys->lock_out ); vlc_restorecancel (canc); return NULL; }
/***************************************************************************** * Filter *****************************************************************************/ static subpicture_t *Filter( filter_t *p_filter, mtime_t date ) { filter_sys_t *p_sys = p_filter->p_sys; bridge_t *p_bridge; int i_real_index, i_row, i_col; int i_greatest_real_index_used = p_sys->i_order_length - 1; unsigned int col_inner_width, row_inner_height; subpicture_region_t *p_region; subpicture_region_t *p_region_prev = NULL; /* Allocate the subpicture internal data. */ subpicture_t *p_spu = filter_NewSubpicture( p_filter ); if( !p_spu ) return NULL; /* Initialize subpicture */ p_spu->i_channel = 0; p_spu->i_start = date; p_spu->i_stop = 0; p_spu->b_ephemer = true; p_spu->i_alpha = p_sys->i_alpha; p_spu->b_absolute = false; p_spu->i_original_picture_width = p_sys->i_width; p_spu->i_original_picture_height = p_sys->i_height; vlc_mutex_lock( &p_sys->lock ); vlc_global_lock( VLC_MOSAIC_MUTEX ); p_bridge = GetBridge( p_filter ); if ( p_bridge == NULL ) { vlc_global_unlock( VLC_MOSAIC_MUTEX ); vlc_mutex_unlock( &p_sys->lock ); return p_spu; } if ( p_sys->i_position == position_offsets ) { /* If we have either too much or not enough offsets, fall-back * to automatic positioning. */ if ( p_sys->i_offsets_length != p_sys->i_order_length ) { msg_Err( p_filter, "Number of specified offsets (%d) does not match number " "of input substreams in mosaic-order (%d), falling back " "to mosaic-position=0", p_sys->i_offsets_length, p_sys->i_order_length ); p_sys->i_position = position_auto; } } if ( p_sys->i_position == position_auto ) { int i_numpics = p_sys->i_order_length; /* keep slots and all */ for( int i_index = 0; i_index < p_bridge->i_es_num; i_index++ ) { bridged_es_t *p_es = p_bridge->pp_es[i_index]; if ( !p_es->b_empty ) { i_numpics ++; if( p_sys->i_order_length && p_es->psz_id != NULL ) { /* We also want to leave slots for images given in * mosaic-order that are not available in p_vout_picture */ for( int i = 0; i < p_sys->i_order_length ; i++ ) { if( !strcmp( p_sys->ppsz_order[i], p_es->psz_id ) ) { i_numpics--; break; } } } } } p_sys->i_rows = ceil(sqrt( (double)i_numpics )); p_sys->i_cols = ( i_numpics % p_sys->i_rows == 0 ? i_numpics / p_sys->i_rows : i_numpics / p_sys->i_rows + 1 ); } col_inner_width = ( ( p_sys->i_width - ( p_sys->i_cols - 1 ) * p_sys->i_borderw ) / p_sys->i_cols ); row_inner_height = ( ( p_sys->i_height - ( p_sys->i_rows - 1 ) * p_sys->i_borderh ) / p_sys->i_rows ); i_real_index = 0; for( int i_index = 0; i_index < p_bridge->i_es_num; i_index++ ) { bridged_es_t *p_es = p_bridge->pp_es[i_index]; video_format_t fmt_in, fmt_out; picture_t *p_converted; if ( p_es->b_empty ) continue; while ( p_es->p_picture != NULL && p_es->p_picture->date + p_sys->i_delay < date ) { if ( p_es->p_picture->p_next != NULL ) { picture_t *p_next = p_es->p_picture->p_next; picture_Release( p_es->p_picture ); p_es->p_picture = p_next; } else if ( p_es->p_picture->date + p_sys->i_delay + BLANK_DELAY < date ) { /* Display blank */ picture_Release( p_es->p_picture ); p_es->p_picture = NULL; p_es->pp_last = &p_es->p_picture; break; } else { msg_Dbg( p_filter, "too late picture for %s (%"PRId64 ")", p_es->psz_id, date - p_es->p_picture->date - p_sys->i_delay ); break; } } if ( p_es->p_picture == NULL ) continue; if ( p_sys->i_order_length == 0 ) { i_real_index++; } else { int i; for ( i = 0; i <= p_sys->i_order_length; i++ ) { if ( i == p_sys->i_order_length ) break; if ( strcmp( p_es->psz_id, p_sys->ppsz_order[i] ) == 0 ) { i_real_index = i; break; } } if ( i == p_sys->i_order_length ) i_real_index = ++i_greatest_real_index_used; } i_row = ( i_real_index / p_sys->i_cols ) % p_sys->i_rows; i_col = i_real_index % p_sys->i_cols ; video_format_Init( &fmt_in, 0 ); video_format_Init( &fmt_out, 0 ); if ( !p_sys->b_keep ) { /* Convert the images */ fmt_in.i_chroma = p_es->p_picture->format.i_chroma; fmt_in.i_height = p_es->p_picture->format.i_height; fmt_in.i_width = p_es->p_picture->format.i_width; if( fmt_in.i_chroma == VLC_CODEC_YUVA || fmt_in.i_chroma == VLC_CODEC_RGBA ) fmt_out.i_chroma = VLC_CODEC_YUVA; else fmt_out.i_chroma = VLC_CODEC_I420; fmt_out.i_width = col_inner_width; fmt_out.i_height = row_inner_height; if( p_sys->b_ar ) /* keep aspect ratio */ { if( (float)fmt_out.i_width / (float)fmt_out.i_height > (float)fmt_in.i_width / (float)fmt_in.i_height ) { fmt_out.i_width = ( fmt_out.i_height * fmt_in.i_width ) / fmt_in.i_height; } else { fmt_out.i_height = ( fmt_out.i_width * fmt_in.i_height ) / fmt_in.i_width; } } fmt_out.i_visible_width = fmt_out.i_width; fmt_out.i_visible_height = fmt_out.i_height; p_converted = image_Convert( p_sys->p_image, p_es->p_picture, &fmt_in, &fmt_out ); if( !p_converted ) { msg_Warn( p_filter, "image resizing and chroma conversion failed" ); video_format_Clean( &fmt_in ); video_format_Clean( &fmt_out ); continue; } } else { p_converted = p_es->p_picture; fmt_in.i_width = fmt_out.i_width = p_converted->format.i_width; fmt_in.i_height = fmt_out.i_height = p_converted->format.i_height; fmt_in.i_chroma = fmt_out.i_chroma = p_converted->format.i_chroma; fmt_out.i_visible_width = fmt_out.i_width; fmt_out.i_visible_height = fmt_out.i_height; } p_region = subpicture_region_New( &fmt_out ); /* FIXME the copy is probably not needed anymore */ if( p_region ) picture_Copy( p_region->p_picture, p_converted ); if( !p_sys->b_keep ) picture_Release( p_converted ); if( !p_region ) { video_format_Clean( &fmt_in ); video_format_Clean( &fmt_out ); msg_Err( p_filter, "cannot allocate SPU region" ); subpicture_Delete( p_spu ); vlc_global_unlock( VLC_MOSAIC_MUTEX ); vlc_mutex_unlock( &p_sys->lock ); return NULL; } if( p_es->i_x >= 0 && p_es->i_y >= 0 ) { p_region->i_x = p_es->i_x; p_region->i_y = p_es->i_y; } else if( p_sys->i_position == position_offsets ) { p_region->i_x = p_sys->pi_x_offsets[i_real_index]; p_region->i_y = p_sys->pi_y_offsets[i_real_index]; } else { if( fmt_out.i_width > col_inner_width || p_sys->b_ar || p_sys->b_keep ) { /* we don't have to center the video since it takes the whole rectangle area or it's larger than the rectangle */ p_region->i_x = p_sys->i_xoffset + i_col * ( p_sys->i_width / p_sys->i_cols ) + ( i_col * p_sys->i_borderw ) / p_sys->i_cols; } else { /* center the video in the dedicated rectangle */ p_region->i_x = p_sys->i_xoffset + i_col * ( p_sys->i_width / p_sys->i_cols ) + ( i_col * p_sys->i_borderw ) / p_sys->i_cols + ( col_inner_width - fmt_out.i_width ) / 2; } if( fmt_out.i_height > row_inner_height || p_sys->b_ar || p_sys->b_keep ) { /* we don't have to center the video since it takes the whole rectangle area or it's taller than the rectangle */ p_region->i_y = p_sys->i_yoffset + i_row * ( p_sys->i_height / p_sys->i_rows ) + ( i_row * p_sys->i_borderh ) / p_sys->i_rows; } else { /* center the video in the dedicated rectangle */ p_region->i_y = p_sys->i_yoffset + i_row * ( p_sys->i_height / p_sys->i_rows ) + ( i_row * p_sys->i_borderh ) / p_sys->i_rows + ( row_inner_height - fmt_out.i_height ) / 2; } } p_region->i_align = p_sys->i_align; p_region->i_alpha = p_es->i_alpha; if( p_region_prev == NULL ) { p_spu->p_region = p_region; } else { p_region_prev->p_next = p_region; } video_format_Clean( &fmt_in ); video_format_Clean( &fmt_out ); p_region_prev = p_region; } vlc_global_unlock( VLC_MOSAIC_MUTEX ); vlc_mutex_unlock( &p_sys->lock ); return p_spu; }
/** * It creates the pool of picture (only 1). * * Each picture has an associated offscreen surface in video memory * depending on hardware capabilities the picture chroma will be as close * as possible to the orginal render chroma to reduce CPU conversion overhead * and delegate this work to video card GPU */ static int Direct3DCreatePool(vout_display_t *vd, video_format_t *fmt) { vout_display_sys_t *sys = vd->sys; LPDIRECT3DDEVICE9 d3ddev = sys->d3ddev; /* */ *fmt = vd->source; /* Find the appropriate D3DFORMAT for the render chroma, the format will be the closest to * the requested chroma which is usable by the hardware in an offscreen surface, as they * typically support more formats than textures */ const d3d_format_t *d3dfmt = Direct3DFindFormat(vd, fmt->i_chroma, sys->d3dpp.BackBufferFormat); if (!d3dfmt) { msg_Err(vd, "surface pixel format is not supported."); return VLC_EGENERIC; } fmt->i_chroma = d3dfmt->fourcc; fmt->i_rmask = d3dfmt->rmask; fmt->i_gmask = d3dfmt->gmask; fmt->i_bmask = d3dfmt->bmask; /* We create one picture. * It is useless to create more as we can't be used for direct rendering */ /* Create a surface */ LPDIRECT3DSURFACE9 surface; HRESULT hr = IDirect3DDevice9_CreateOffscreenPlainSurface(d3ddev, fmt->i_width, fmt->i_height, d3dfmt->format, D3DPOOL_DEFAULT, &surface, NULL); if (FAILED(hr)) { msg_Err(vd, "Failed to create picture surface. (hr=0x%lx)", hr); return VLC_EGENERIC; } /* fill surface with black color */ IDirect3DDevice9_ColorFill(d3ddev, surface, NULL, D3DCOLOR_ARGB(0xFF, 0, 0, 0)); /* Create the associated picture */ picture_sys_t *picsys = malloc(sizeof(*picsys)); if (unlikely(picsys == NULL)) { IDirect3DSurface9_Release(surface); return VLC_ENOMEM; } picsys->surface = surface; picsys->fallback = NULL; picture_resource_t resource = { .p_sys = picsys }; for (int i = 0; i < PICTURE_PLANE_MAX; i++) resource.p[i].i_lines = fmt->i_height / (i > 0 ? 2 : 1); picture_t *picture = picture_NewFromResource(fmt, &resource); if (!picture) { IDirect3DSurface9_Release(surface); free(picsys); return VLC_ENOMEM; } sys->picsys = picsys; /* Wrap it into a picture pool */ picture_pool_configuration_t pool_cfg; memset(&pool_cfg, 0, sizeof(pool_cfg)); pool_cfg.picture_count = 1; pool_cfg.picture = &picture; pool_cfg.lock = Direct3DLockSurface; pool_cfg.unlock = Direct3DUnlockSurface; sys->pool = picture_pool_NewExtended(&pool_cfg); if (!sys->pool) { picture_Release(picture); IDirect3DSurface9_Release(surface); return VLC_ENOMEM; } return VLC_SUCCESS; }
static int Open(vlc_object_t *p_this) { vout_display_t *vd = (vout_display_t *)p_this; video_format_t fmt; video_format_ApplyRotation(&fmt, &vd->fmt); if (fmt.i_chroma == VLC_CODEC_ANDROID_OPAQUE) return VLC_EGENERIC; if (vout_display_IsWindowed(vd)) return VLC_EGENERIC; /* Allocate structure */ vout_display_sys_t *sys = (struct vout_display_sys_t*) calloc(1, sizeof(*sys)); if (!sys) goto error; /* */ sys->p_library = InitLibrary(sys); if (!sys->p_library) { msg_Err(vd, "Could not initialize libandroid.so/libui.so/libgui.so/libsurfaceflinger_client.so!"); goto error; } /* Setup chroma */ char *psz_fcc = var_InheritString(vd, CFG_PREFIX "chroma"); if( psz_fcc ) { fmt.i_chroma = vlc_fourcc_GetCodecFromString(VIDEO_ES, psz_fcc); free(psz_fcc); } else fmt.i_chroma = VLC_CODEC_RGB32; switch(fmt.i_chroma) { case VLC_CODEC_RGB16: fmt.i_bmask = 0x0000001f; fmt.i_gmask = 0x000007e0; fmt.i_rmask = 0x0000f800; break; case VLC_CODEC_YV12: case VLC_CODEC_I420: fmt.i_chroma = VLC_CODEC_RGB32; case VLC_CODEC_RGB32: fmt.i_rmask = 0x000000ff; fmt.i_gmask = 0x0000ff00; fmt.i_bmask = 0x00ff0000; break; default: return VLC_EGENERIC; } video_format_FixRgb(&fmt); msg_Dbg(vd, "Pixel format %4.4s", (char*)&fmt.i_chroma); sys->i_android_hal = ChromaToAndroidHal(fmt.i_chroma); if (sys->i_android_hal == -1) goto error; sys->fmt = fmt; UpdateLayout(sys); /* Create the associated picture */ picture_sys_t *picsys = calloc(1, sizeof(picture_sys_t)); if (unlikely(picsys == NULL)) goto error; picsys->sys = sys; picture_resource_t resource = { .p_sys = picsys }; picture_t *picture = picture_NewFromResource(&fmt, &resource); if (!picture) { free(picsys); goto error; } /* Wrap it into a picture pool */ picture_pool_configuration_t pool_cfg; memset(&pool_cfg, 0, sizeof(pool_cfg)); pool_cfg.picture_count = 1; pool_cfg.picture = &picture; pool_cfg.lock = AndroidLockSurface; pool_cfg.unlock = AndroidUnlockSurface; sys->pool = picture_pool_NewExtended(&pool_cfg); if (!sys->pool) { picture_Release(picture); goto error; } /* Setup vout_display */ vd->sys = sys; vd->fmt = fmt; vd->pool = Pool; vd->display = Display; vd->control = Control; vd->prepare = NULL; vd->manage = Manage; /* Fix initial state */ vout_display_SendEventFullscreen(vd, false); return VLC_SUCCESS; error: Close(p_this); return VLC_ENOMEM; }
/***************************************************************************** * Run the filter on a Planar YUV picture *****************************************************************************/ static picture_t *FilterPlanar( filter_t *p_filter, picture_t *p_pic ) { int pi_luma[256]; int pi_gamma[256]; picture_t *p_outpic; uint8_t *p_in, *p_in_end, *p_line_end; uint8_t *p_out; filter_sys_t *p_sys = p_filter->p_sys; if( !p_pic ) return NULL; p_outpic = filter_NewPicture( p_filter ); if( !p_outpic ) { picture_Release( p_pic ); return NULL; } /* Get variables */ vlc_mutex_lock( &p_sys->lock ); int32_t i_cont = lroundf( p_sys->f_contrast * 255.f ); int32_t i_lum = lroundf( (p_sys->f_brightness - 1.f) * 255.f ); float f_hue = p_sys->f_hue * (float)(M_PI / 180.); int i_sat = (int)( p_sys->f_saturation * 256.f ); float f_gamma = 1.f / p_sys->f_gamma; bool b_thres = p_sys->b_brightness_threshold; vlc_mutex_unlock( &p_sys->lock ); /* * Threshold mode drops out everything about luma, contrast and gamma. */ if( !b_thres ) { /* Contrast is a fast but kludged function, so I put this gap to be * cleaner :) */ i_lum += 128 - i_cont / 2; /* Fill the gamma lookup table */ for( unsigned i = 0 ; i < 256 ; i++ ) { pi_gamma[ i ] = clip_uint8_vlc( powf(i / 255.f, f_gamma) * 255.f); } /* Fill the luma lookup table */ for( unsigned i = 0 ; i < 256 ; i++ ) { pi_luma[ i ] = pi_gamma[clip_uint8_vlc( i_lum + i_cont * i / 256)]; } } else { /* * We get luma as threshold value: the higher it is, the darker is * the image. Should I reverse this? */ for( int i = 0 ; i < 256 ; i++ ) { pi_luma[ i ] = (i < i_lum) ? 0 : 255; } /* * Desaturates image to avoid that strange yellow halo... */ i_sat = 0; } /* * Do the Y plane */ p_in = p_pic->p[Y_PLANE].p_pixels; p_in_end = p_in + p_pic->p[Y_PLANE].i_visible_lines * p_pic->p[Y_PLANE].i_pitch - 8; p_out = p_outpic->p[Y_PLANE].p_pixels; for( ; p_in < p_in_end ; ) { p_line_end = p_in + p_pic->p[Y_PLANE].i_visible_pitch - 8; for( ; p_in < p_line_end ; ) { /* Do 8 pixels at a time */ *p_out++ = pi_luma[ *p_in++ ]; *p_out++ = pi_luma[ *p_in++ ]; *p_out++ = pi_luma[ *p_in++ ]; *p_out++ = pi_luma[ *p_in++ ]; *p_out++ = pi_luma[ *p_in++ ]; *p_out++ = pi_luma[ *p_in++ ]; *p_out++ = pi_luma[ *p_in++ ]; *p_out++ = pi_luma[ *p_in++ ]; } p_line_end += 8; for( ; p_in < p_line_end ; ) { *p_out++ = pi_luma[ *p_in++ ]; } p_in += p_pic->p[Y_PLANE].i_pitch - p_pic->p[Y_PLANE].i_visible_pitch; p_out += p_outpic->p[Y_PLANE].i_pitch - p_outpic->p[Y_PLANE].i_visible_pitch; } /* * Do the U and V planes */ int i_sin = sinf(f_hue) * 256.f; int i_cos = cosf(f_hue) * 256.f; int i_x = ( cosf(f_hue) + sinf(f_hue) ) * 32768.f; int i_y = ( cosf(f_hue) - sinf(f_hue) ) * 32768.f; if ( i_sat > 256 ) { /* Currently no errors are implemented in the function, if any are added * check them here */ p_sys->pf_process_sat_hue_clip( p_pic, p_outpic, i_sin, i_cos, i_sat, i_x, i_y ); } else { /* Currently no errors are implemented in the function, if any are added * check them here */ p_sys->pf_process_sat_hue( p_pic, p_outpic, i_sin, i_cos, i_sat, i_x, i_y ); } return CopyInfoAndRelease( p_outpic, p_pic ); }
/* This is the filter function. See Open(). */ picture_t *Deinterlace( filter_t *p_filter, picture_t *p_pic ) { filter_sys_t *p_sys = p_filter->p_sys; picture_t *p_dst[DEINTERLACE_DST_SIZE]; /* Request output picture */ p_dst[0] = filter_NewPicture( p_filter ); if( p_dst[0] == NULL ) { picture_Release( p_pic ); return NULL; } picture_CopyProperties( p_dst[0], p_pic ); /* Any unused p_dst pointers must be NULL, because they are used to check how many output frames we have. */ for( int i = 1; i < DEINTERLACE_DST_SIZE; ++i ) p_dst[i] = NULL; /* Update the input frame history, if the currently active algorithm needs it. */ if( p_sys->b_use_frame_history ) { /* Duplicate the picture * TODO when the vout rework is finished, picture_Hold() might be enough * but becarefull, the pitches must match */ picture_t *p_dup = picture_NewFromFormat( &p_pic->format ); if( p_dup ) picture_Copy( p_dup, p_pic ); /* Slide the history */ if( p_sys->pp_history[0] ) picture_Release( p_sys->pp_history[0] ); for( int i = 1; i < HISTORY_SIZE; i++ ) p_sys->pp_history[i-1] = p_sys->pp_history[i]; p_sys->pp_history[HISTORY_SIZE-1] = p_dup; } /* Slide the metadata history. */ for( int i = 1; i < METADATA_SIZE; i++ ) { p_sys->meta.pi_date[i-1] = p_sys->meta.pi_date[i]; p_sys->meta.pi_nb_fields[i-1] = p_sys->meta.pi_nb_fields[i]; p_sys->meta.pb_top_field_first[i-1] = p_sys->meta.pb_top_field_first[i]; } /* The last element corresponds to the current input frame. */ p_sys->meta.pi_date[METADATA_SIZE-1] = p_pic->date; p_sys->meta.pi_nb_fields[METADATA_SIZE-1] = p_pic->i_nb_fields; p_sys->meta.pb_top_field_first[METADATA_SIZE-1] = p_pic->b_top_field_first; /* Remember the frame offset that we should use for this frame. The value in p_sys will be updated to reflect the correct value for the *next* frame when we call the renderer. */ int i_frame_offset = p_sys->i_frame_offset; int i_meta_idx = (METADATA_SIZE-1) - i_frame_offset; /* These correspond to the current *outgoing* frame. */ bool b_top_field_first; int i_nb_fields; if( i_frame_offset != CUSTOM_PTS ) { /* Pick the correct values from the history. */ b_top_field_first = p_sys->meta.pb_top_field_first[i_meta_idx]; i_nb_fields = p_sys->meta.pi_nb_fields[i_meta_idx]; } else { /* Framerate doublers must not request CUSTOM_PTS, as they need the original field timings, and need Deinterlace() to allocate the correct number of output frames. */ assert( !p_sys->b_double_rate ); /* NOTE: i_nb_fields is only used for framerate doublers, so it is unused in this case. b_top_field_first is only passed to the algorithm. We assume that algorithms that request CUSTOM_PTS will, if necessary, extract the TFF/BFF information themselves. */ b_top_field_first = p_pic->b_top_field_first; /* this is not guaranteed to be meaningful */ i_nb_fields = p_pic->i_nb_fields; /* unused */ } /* For framerate doublers, determine field duration and allocate output frames. */ mtime_t i_field_dur = 0; int i_double_rate_alloc_end = 0; /* One past last for allocated output frames in p_dst[]. Used only for framerate doublers. Will be inited below. Declared here because the PTS logic needs the result. */ if( p_sys->b_double_rate ) { /* Calculate one field duration. */ int i = 0; int iend = METADATA_SIZE-1; /* Find oldest valid logged date. The current input frame doesn't count. */ for( ; i < iend; i++ ) if( p_sys->meta.pi_date[i] > VLC_TS_INVALID ) break; if( i < iend ) { /* Count how many fields the valid history entries (except the new frame) represent. */ int i_fields_total = 0; for( int j = i ; j < iend; j++ ) i_fields_total += p_sys->meta.pi_nb_fields[j]; /* One field took this long. */ i_field_dur = (p_pic->date - p_sys->meta.pi_date[i]) / i_fields_total; } /* Note that we default to field duration 0 if it could not be determined. This behaves the same as the old code - leaving the extra output frame dates the same as p_pic->date if the last cached date was not valid. */ i_double_rate_alloc_end = i_nb_fields; if( i_nb_fields > DEINTERLACE_DST_SIZE ) { /* Note that the effective buffer size depends also on the constant private_picture in vout_wrapper.c, since that determines the maximum number of output pictures filter_NewPicture() will successfully allocate for one input frame. */ msg_Err( p_filter, "Framerate doubler: output buffer too small; "\ "fields = %d, buffer size = %d. Dropping the "\ "remaining fields.", i_nb_fields, DEINTERLACE_DST_SIZE ); i_double_rate_alloc_end = DEINTERLACE_DST_SIZE; } /* Allocate output frames. */ for( int i = 1; i < i_double_rate_alloc_end ; ++i ) { p_dst[i-1]->p_next = p_dst[i] = filter_NewPicture( p_filter ); if( p_dst[i] ) { picture_CopyProperties( p_dst[i], p_pic ); } else { msg_Err( p_filter, "Framerate doubler: could not allocate "\ "output frame %d", i+1 ); i_double_rate_alloc_end = i; /* Inform the PTS logic about the correct end position. */ break; /* If this happens, the rest of the allocations aren't likely to work, either... */ } } /* Now we have allocated *up to* the correct number of frames; normally, exactly the correct number. Upon alloc failure, we may have succeeded in allocating *some* output frames, but fewer than were desired. In such a case, as many will be rendered as were successfully allocated. Note that now p_dst[i] != NULL for 0 <= i < i_double_rate_alloc_end. */ } assert( p_sys->b_double_rate || p_dst[1] == NULL ); assert( i_nb_fields > 2 || p_dst[2] == NULL ); /* Render */ switch( p_sys->i_mode ) { case DEINTERLACE_DISCARD: RenderDiscard( p_dst[0], p_pic, 0 ); break; case DEINTERLACE_BOB: RenderBob( p_dst[0], p_pic, !b_top_field_first ); if( p_dst[1] ) RenderBob( p_dst[1], p_pic, b_top_field_first ); if( p_dst[2] ) RenderBob( p_dst[2], p_pic, !b_top_field_first ); break;; case DEINTERLACE_LINEAR: RenderLinear( p_filter, p_dst[0], p_pic, !b_top_field_first ); if( p_dst[1] ) RenderLinear( p_filter, p_dst[1], p_pic, b_top_field_first ); if( p_dst[2] ) RenderLinear( p_filter, p_dst[2], p_pic, !b_top_field_first ); break; case DEINTERLACE_MEAN: RenderMean( p_filter, p_dst[0], p_pic ); break; case DEINTERLACE_BLEND: RenderBlend( p_filter, p_dst[0], p_pic ); break; case DEINTERLACE_X: RenderX( p_dst[0], p_pic ); break; case DEINTERLACE_YADIF: if( RenderYadif( p_filter, p_dst[0], p_pic, 0, 0 ) ) goto drop; break; case DEINTERLACE_YADIF2X: if( RenderYadif( p_filter, p_dst[0], p_pic, 0, !b_top_field_first ) ) goto drop; if( p_dst[1] ) RenderYadif( p_filter, p_dst[1], p_pic, 1, b_top_field_first ); if( p_dst[2] ) RenderYadif( p_filter, p_dst[2], p_pic, 2, !b_top_field_first ); break; case DEINTERLACE_PHOSPHOR: if( RenderPhosphor( p_filter, p_dst[0], 0, !b_top_field_first ) ) goto drop; if( p_dst[1] ) RenderPhosphor( p_filter, p_dst[1], 1, b_top_field_first ); if( p_dst[2] ) RenderPhosphor( p_filter, p_dst[2], 2, !b_top_field_first ); break; case DEINTERLACE_IVTC: /* Note: RenderIVTC will automatically drop the duplicate frames produced by IVTC. This is part of normal operation. */ if( RenderIVTC( p_filter, p_dst[0] ) ) goto drop; break; } /* Set output timestamps, if the algorithm didn't request CUSTOM_PTS for this frame. */ assert( i_frame_offset <= METADATA_SIZE || i_frame_offset == CUSTOM_PTS ); if( i_frame_offset != CUSTOM_PTS ) { mtime_t i_base_pts = p_sys->meta.pi_date[i_meta_idx]; /* Note: in the usual case (i_frame_offset = 0 and b_double_rate = false), this effectively does nothing. This is needed to correct the timestamp when i_frame_offset > 0. */ p_dst[0]->date = i_base_pts; if( p_sys->b_double_rate ) { /* Processing all actually allocated output frames. */ for( int i = 1; i < i_double_rate_alloc_end; ++i ) { /* XXX it's not really good especially for the first picture, but * I don't think that delaying by one frame is worth it */ if( i_base_pts > VLC_TS_INVALID ) p_dst[i]->date = i_base_pts + i * i_field_dur; else p_dst[i]->date = VLC_TS_INVALID; } } } for( int i = 0; i < DEINTERLACE_DST_SIZE; ++i ) { if( p_dst[i] ) { p_dst[i]->b_progressive = true; p_dst[i]->i_nb_fields = 2; } } picture_Release( p_pic ); return p_dst[0]; drop: picture_Release( p_dst[0] ); for( int i = 1; i < DEINTERLACE_DST_SIZE; ++i ) { if( p_dst[i] ) picture_Release( p_dst[i] ); } picture_Release( p_pic ); return NULL; }
/** * It creates multiples pictures from the source one */ static int Filter( video_splitter_t *p_splitter, picture_t *pp_dst[], picture_t *p_src ) { video_splitter_sys_t *p_sys = p_splitter->p_sys; if( video_splitter_NewPicture( p_splitter, pp_dst ) ) { picture_Release( p_src ); return VLC_EGENERIC; } for( int y = 0; y < p_sys->i_row; y++ ) { for( int x = 0; x < p_sys->i_col; x++ ) { const panoramix_output_t *p_output = &p_sys->pp_output[x][y]; if( !p_output->b_active ) continue; /* */ picture_t *p_dst = pp_dst[p_output->i_output]; /* */ picture_CopyProperties( p_dst, p_src ); /* */ for( int i_plane = 0; i_plane < p_src->i_planes; i_plane++ ) { const int i_div_w = p_sys->p_chroma->pi_div_w[i_plane]; const int i_div_h = p_sys->p_chroma->pi_div_h[i_plane]; if( !i_div_w || !i_div_h ) continue; const plane_t *p_srcp = &p_src->p[i_plane]; const plane_t *p_dstp = &p_dst->p[i_plane]; /* */ panoramix_filter_t filter; filter.black.i_right = p_output->filter.black.i_right / i_div_w; filter.black.i_left = p_output->filter.black.i_left / i_div_w; filter.black.i_top = p_output->filter.black.i_top / i_div_h; filter.black.i_bottom = p_output->filter.black.i_bottom / i_div_h; filter.attenuate.i_right = p_output->filter.attenuate.i_right / i_div_w; filter.attenuate.i_left = p_output->filter.attenuate.i_left / i_div_w; filter.attenuate.i_top = p_output->filter.attenuate.i_top / i_div_h; filter.attenuate.i_bottom = p_output->filter.attenuate.i_bottom / i_div_h; /* */ const int i_x = p_output->i_src_x/i_div_w; const int i_y = p_output->i_src_y/i_div_h; assert( p_sys->p_chroma->b_planar ); FilterPlanar( p_dstp->p_pixels, p_dstp->i_pitch, &p_srcp->p_pixels[i_y * p_srcp->i_pitch + i_x * p_srcp->i_pixel_pitch], p_srcp->i_pitch, p_output->i_src_width/i_div_w, p_output->i_src_height/i_div_h, p_sys->p_chroma->pi_black[i_plane], &filter, p_sys->p_lut[i_plane], p_sys->lambdav[i_plane], p_sys->lambdah[i_plane] ); } } } picture_Release( p_src ); return VLC_SUCCESS; }
static void video_del_buffer_decoder( decoder_t *p_decoder, picture_t *p_pic ) { VLC_UNUSED(p_decoder); picture_Release( p_pic ); }
static int Send( sout_stream_t *p_stream, sout_stream_id_t *id, block_t *p_buffer ) { sout_stream_sys_t *p_sys = p_stream->p_sys; picture_t *p_pic; if ( (sout_stream_sys_t *)id != p_sys ) { block_ChainRelease( p_buffer ); return VLC_SUCCESS; } while ( (p_pic = p_sys->p_decoder->pf_decode_video( p_sys->p_decoder, &p_buffer )) ) { picture_t *p_new_pic; if( p_sys->i_height || p_sys->i_width ) { video_format_t fmt_out, fmt_in; memset( &fmt_in, 0, sizeof(video_format_t) ); memset( &fmt_out, 0, sizeof(video_format_t) ); fmt_in = p_sys->p_decoder->fmt_out.video; if( p_sys->i_chroma ) fmt_out.i_chroma = p_sys->i_chroma; else fmt_out.i_chroma = VLC_CODEC_I420; const unsigned i_fmt_in_aspect = (int64_t)VOUT_ASPECT_FACTOR * fmt_in.i_sar_num * fmt_in.i_width / (fmt_in.i_sar_den * fmt_in.i_height); if ( !p_sys->i_height ) { fmt_out.i_width = p_sys->i_width; fmt_out.i_height = (p_sys->i_width * VOUT_ASPECT_FACTOR * p_sys->i_sar_num / p_sys->i_sar_den / i_fmt_in_aspect) & ~0x1; } else if ( !p_sys->i_width ) { fmt_out.i_height = p_sys->i_height; fmt_out.i_width = (p_sys->i_height * i_fmt_in_aspect * p_sys->i_sar_den / p_sys->i_sar_num / VOUT_ASPECT_FACTOR) & ~0x1; } else { fmt_out.i_width = p_sys->i_width; fmt_out.i_height = p_sys->i_height; } fmt_out.i_visible_width = fmt_out.i_width; fmt_out.i_visible_height = fmt_out.i_height; p_new_pic = image_Convert( p_sys->p_image, p_pic, &fmt_in, &fmt_out ); if( p_new_pic == NULL ) { msg_Err( p_stream, "image conversion failed" ); picture_Release( p_pic ); continue; } } else { /* TODO: chroma conversion if needed */ p_new_pic = picture_New( p_pic->format.i_chroma, p_pic->format.i_width, p_pic->format.i_height, p_sys->p_decoder->fmt_out.video.i_sar_num, p_sys->p_decoder->fmt_out.video.i_sar_den ); if( !p_new_pic ) { picture_Release( p_pic ); msg_Err( p_stream, "image allocation failed" ); continue; } picture_Copy( p_new_pic, p_pic ); } picture_Release( p_pic ); if( p_sys->p_vf2 ) p_new_pic = filter_chain_VideoFilter( p_sys->p_vf2, p_new_pic ); PushPicture( p_stream, p_new_pic ); } return VLC_SUCCESS; }
static void OutputFrame( sout_stream_t *p_stream, picture_t *p_pic, sout_stream_id_sys_t *id, block_t **out ) { sout_stream_sys_t *p_sys = p_stream->p_sys; picture_t *p_pic2 = NULL; const mtime_t original_date = p_pic->date; bool b_need_duplicate=false; /* If input pts is lower than next_output_pts - output_frame_interval * Then the future input frame should fit better and we can drop this one * * We check it here also because we can have case that video filters outputs multiple * pictures but we don't need to use them all, for example yadif2x and outputting to some * different fps value */ if( ( original_date ) < ( date_Get( &id->next_output_pts ) - (mtime_t)id->i_output_frame_interval ) ) { #if 0 msg_Dbg( p_stream, "dropping frame (%"PRId64" + %"PRId64" vs %"PRId64")", p_pic->date, id->i_input_frame_interval, date_Get(&id->next_output_pts) ); #endif picture_Release( p_pic ); return; } /* * Encoding */ /* Check if we have a subpicture to overlay */ if( p_sys->p_spu ) { video_format_t fmt = id->p_encoder->fmt_in.video; if( fmt.i_visible_width <= 0 || fmt.i_visible_height <= 0 ) { fmt.i_visible_width = fmt.i_width; fmt.i_visible_height = fmt.i_height; fmt.i_x_offset = 0; fmt.i_y_offset = 0; } subpicture_t *p_subpic = spu_Render( p_sys->p_spu, NULL, &fmt, &fmt, p_pic->date, p_pic->date, false ); /* Overlay subpicture */ if( p_subpic ) { if( picture_IsReferenced( p_pic ) && !filter_chain_GetLength( id->p_f_chain ) ) { /* We can't modify the picture, we need to duplicate it, * in this point the picture is already p_encoder->fmt.in format*/ picture_t *p_tmp = video_new_buffer_encoder( id->p_encoder ); if( likely( p_tmp ) ) { picture_Copy( p_tmp, p_pic ); picture_Release( p_pic ); p_pic = p_tmp; } } if( unlikely( !p_sys->p_spu_blend ) ) p_sys->p_spu_blend = filter_NewBlend( VLC_OBJECT( p_sys->p_spu ), &fmt ); if( likely( p_sys->p_spu_blend ) ) picture_BlendSubpicture( p_pic, p_sys->p_spu_blend, p_subpic ); subpicture_Delete( p_subpic ); } } /* set output pts*/ p_pic->date = date_Get( &id->next_output_pts ); /*This pts is handled, increase clock to next one*/ date_Increment( &id->next_output_pts, id->p_encoder->fmt_in.video.i_frame_rate_base ); if( p_sys->i_threads == 0 ) { block_t *p_block; p_block = id->p_encoder->pf_encode_video( id->p_encoder, p_pic ); block_ChainAppend( out, p_block ); } /* we need to duplicate while next_output_pts + output_frame_interval < input_pts (next input pts)*/ b_need_duplicate = ( date_Get( &id->next_output_pts ) + id->i_output_frame_interval ) < ( original_date ); if( p_sys->i_threads ) { if( p_sys->b_master_sync ) { p_pic2 = video_new_buffer_encoder( id->p_encoder ); if( likely( p_pic2 != NULL ) ) picture_Copy( p_pic2, p_pic ); } vlc_mutex_lock( &p_sys->lock_out ); picture_fifo_Push( p_sys->pp_pics, p_pic ); vlc_cond_signal( &p_sys->cond ); vlc_mutex_unlock( &p_sys->lock_out ); } while( (p_sys->b_master_sync && b_need_duplicate )) { if( p_sys->i_threads >= 1 ) { picture_t *p_tmp = NULL; /* We can't modify the picture, we need to duplicate it */ p_tmp = video_new_buffer_encoder( id->p_encoder ); if( likely( p_tmp != NULL ) ) { picture_Copy( p_tmp, p_pic2 ); p_tmp->date = date_Get( &id->next_output_pts ); vlc_mutex_lock( &p_sys->lock_out ); picture_fifo_Push( p_sys->pp_pics, p_tmp ); vlc_cond_signal( &p_sys->cond ); vlc_mutex_unlock( &p_sys->lock_out ); } } else { block_t *p_block; p_pic->date = date_Get( &id->next_output_pts ); p_block = id->p_encoder->pf_encode_video(id->p_encoder, p_pic); block_ChainAppend( out, p_block ); } #if 0 msg_Dbg( p_stream, "duplicated frame"); #endif date_Increment( &id->next_output_pts, id->p_encoder->fmt_in.video.i_frame_rate_base ); b_need_duplicate = ( date_Get( &id->next_output_pts ) + id->i_output_frame_interval ) < ( original_date ); } if( p_sys->i_threads && p_pic2 ) picture_Release( p_pic2 ); else if ( p_sys->i_threads == 0 ) picture_Release( p_pic ); }
static void video_unlink_picture_decoder( decoder_t *p_dec, picture_t *p_pic ) { VLC_UNUSED(p_dec); picture_Release( p_pic ); }
static void transcode_video_filter_buffer_del( filter_t *p_filter, picture_t *p_pic ) { VLC_UNUSED(p_filter); picture_Release( p_pic ); }
/**************************************************************************** * Filter: the whole thing ****************************************************************************/ static picture_t *Filter( filter_t *p_filter, picture_t *p_pic ) { filter_sys_t *p_sys = p_filter->p_sys; picture_t *p_outpic; int i_plane; int i_width, i_height, i_xcrop, i_ycrop, i_outwidth, i_outheight, i_xpadd, i_ypadd; const int p_padd_color[] = { 0x00, 0x80, 0x80, 0xff }; if( !p_pic ) return NULL; /* Request output picture */ p_outpic = filter_NewPicture( p_filter ); if( !p_outpic ) { picture_Release( p_pic ); return NULL; } for( i_plane = 0; i_plane < p_pic->i_planes; i_plane++ ) /* p_pic and p_outpic have the same chroma/number of planes but that's * about it. */ { plane_t *p_plane = p_pic->p+i_plane; plane_t *p_outplane = p_outpic->p+i_plane; uint8_t *p_in = p_plane->p_pixels; uint8_t *p_out = p_outplane->p_pixels; int i_pixel_pitch = p_plane->i_pixel_pitch; int i_padd_color = i_plane > 3 ? p_padd_color[0] : p_padd_color[i_plane]; /* These assignments assume that the first plane always has * a width and height equal to the picture's */ i_width = ( ( p_filter->fmt_in.video.i_visible_width - p_sys->i_cropleft - p_sys->i_cropright ) * p_plane->i_visible_pitch ) / p_pic->p->i_visible_pitch; i_height = ( ( p_filter->fmt_in.video.i_visible_height - p_sys->i_croptop - p_sys->i_cropbottom ) * p_plane->i_visible_lines ) / p_pic->p->i_visible_lines; i_xcrop = ( p_sys->i_cropleft * p_plane->i_visible_pitch) / p_pic->p->i_visible_pitch; i_ycrop = ( p_sys->i_croptop * p_plane->i_visible_lines) / p_pic->p->i_visible_lines; i_outwidth = ( p_filter->fmt_out.video.i_visible_width * p_outplane->i_visible_pitch ) / p_outpic->p->i_visible_pitch; i_outheight = ( p_filter->fmt_out.video.i_visible_height * p_outplane->i_visible_lines ) / p_outpic->p->i_visible_lines; i_xpadd = ( p_sys->i_paddleft * p_outplane->i_visible_pitch ) / p_outpic->p->i_visible_pitch; i_ypadd = ( p_sys->i_paddtop * p_outplane->i_visible_lines ) / p_outpic->p->i_visible_lines; /* Crop the top */ p_in += i_ycrop * p_plane->i_pitch; /* Padd on the top */ memset( p_out, i_padd_color, i_ypadd * p_outplane->i_pitch ); p_out += i_ypadd * p_outplane->i_pitch; int i_line; for( i_line = 0; i_line < i_height; i_line++ ) { uint8_t *p_in_next = p_in + p_plane->i_pitch; uint8_t *p_out_next = p_out + p_outplane->i_pitch; /* Crop on the left */ p_in += i_xcrop * i_pixel_pitch; /* Padd on the left */ memset( p_out, i_padd_color, i_xpadd * i_pixel_pitch ); p_out += i_xpadd * i_pixel_pitch; /* Copy the image and crop on the right */ memcpy( p_out, p_in, i_width * i_pixel_pitch ); p_out += i_width * i_pixel_pitch; p_in += i_width * i_pixel_pitch; /* Padd on the right */ memset( p_out, i_padd_color, ( i_outwidth - i_xpadd - i_width ) * i_pixel_pitch ); /* Got to begining of the next line */ p_in = p_in_next; p_out = p_out_next; } /* Padd on the bottom */ memset( p_out, i_padd_color, ( i_outheight - i_ypadd - i_height ) * p_outplane->i_pitch ); } return CopyInfoAndRelease( p_outpic, p_pic ); }
int transcode_video_process( sout_stream_t *p_stream, sout_stream_id_sys_t *id, block_t *in, block_t **out ) { sout_stream_sys_t *p_sys = p_stream->p_sys; picture_t *p_pic = NULL; *out = NULL; if( unlikely( in == NULL ) ) { if( p_sys->i_threads == 0 ) { block_t *p_block; do { p_block = id->p_encoder->pf_encode_video(id->p_encoder, NULL ); block_ChainAppend( out, p_block ); } while( p_block ); } else { msg_Dbg( p_stream, "Flushing thread and waiting that"); vlc_mutex_lock( &p_stream->p_sys->lock_out ); p_stream->p_sys->b_abort = true; vlc_cond_signal( &p_stream->p_sys->cond ); vlc_mutex_unlock( &p_stream->p_sys->lock_out ); vlc_join( p_stream->p_sys->thread, NULL ); vlc_mutex_lock( &p_sys->lock_out ); *out = p_sys->p_buffers; p_sys->p_buffers = NULL; vlc_mutex_unlock( &p_sys->lock_out ); msg_Dbg( p_stream, "Flushing done"); } return VLC_SUCCESS; } while( (p_pic = id->p_decoder->pf_decode_video( id->p_decoder, &in )) ) { if( unlikely ( id->p_encoder->p_module && !video_format_IsSimilar( &id->fmt_input_video, &id->p_decoder->fmt_out.video ) ) ) { msg_Info( p_stream, "aspect-ratio changed, reiniting. %i -> %i : %i -> %i.", id->fmt_input_video.i_sar_num, id->p_decoder->fmt_out.video.i_sar_num, id->fmt_input_video.i_sar_den, id->p_decoder->fmt_out.video.i_sar_den ); /* Close filters */ if( id->p_f_chain ) filter_chain_Delete( id->p_f_chain ); id->p_f_chain = NULL; if( id->p_uf_chain ) filter_chain_Delete( id->p_uf_chain ); id->p_uf_chain = NULL; /* Reinitialize filters */ id->p_encoder->fmt_out.video.i_visible_width = p_sys->i_width & ~1; id->p_encoder->fmt_out.video.i_visible_height = p_sys->i_height & ~1; id->p_encoder->fmt_out.video.i_sar_num = id->p_encoder->fmt_out.video.i_sar_den = 0; transcode_video_filter_init( p_stream, id ); transcode_video_encoder_init( p_stream, id ); conversion_video_filter_append( id ); memcpy( &id->fmt_input_video, &id->p_decoder->fmt_out.video, sizeof(video_format_t)); } if( unlikely( !id->p_encoder->p_module ) ) { if( id->p_f_chain ) filter_chain_Delete( id->p_f_chain ); if( id->p_uf_chain ) filter_chain_Delete( id->p_uf_chain ); id->p_f_chain = id->p_uf_chain = NULL; transcode_video_filter_init( p_stream, id ); transcode_video_encoder_init( p_stream, id ); conversion_video_filter_append( id ); memcpy( &id->fmt_input_video, &id->p_decoder->fmt_out.video, sizeof(video_format_t)); if( transcode_video_encoder_open( p_stream, id ) != VLC_SUCCESS ) { picture_Release( p_pic ); transcode_video_close( p_stream, id ); id->b_transcode = false; return VLC_EGENERIC; } date_Set( &id->next_output_pts, p_pic->date ); date_Set( &id->next_input_pts, p_pic->date ); } /*Input lipsync and drop check */ if( p_sys->b_master_sync ) { /* If input pts lower than next_output_pts - output_frame_interval * Then the future input frame should fit better and we can drop this one * * We check this here as we don't need to run video filter at all for pictures * we are going to drop anyway * * Duplication need is checked in OutputFrame */ if( ( p_pic->date ) < ( date_Get( &id->next_output_pts ) - (mtime_t)id->i_output_frame_interval ) ) { #if 0 msg_Dbg( p_stream, "dropping frame (%"PRId64" + %"PRId64" vs %"PRId64")", p_pic->date, id->i_input_frame_interval, date_Get(&id->next_output_pts) ); #endif picture_Release( p_pic ); continue; } #if 0 msg_Dbg( p_stream, "not dropping frame"); #endif } /* Check input drift regardless, if it's more than 100ms from our approximation, we most likely have lost pictures * and are in danger to become out of sync, so better reset timestamps then */ if( likely( p_pic->date != VLC_TS_INVALID ) ) { mtime_t input_drift = p_pic->date - date_Get( &id->next_input_pts ); if( unlikely( (input_drift > (CLOCK_FREQ/10)) || (input_drift < -(CLOCK_FREQ/10)) ) ) { msg_Warn( p_stream, "Reseting video sync" ); date_Set( &id->next_output_pts, p_pic->date ); date_Set( &id->next_input_pts, p_pic->date ); } } date_Increment( &id->next_input_pts, id->p_decoder->fmt_out.video.i_frame_rate_base ); /* Run the filter and output chains; first with the picture, * and then with NULL as many times as we need until they * stop outputting frames. */ for ( ;; ) { picture_t *p_filtered_pic = p_pic; /* Run filter chain */ if( id->p_f_chain ) p_filtered_pic = filter_chain_VideoFilter( id->p_f_chain, p_filtered_pic ); if( !p_filtered_pic ) break; for ( ;; ) { picture_t *p_user_filtered_pic = p_filtered_pic; /* Run user specified filter chain */ if( id->p_uf_chain ) p_user_filtered_pic = filter_chain_VideoFilter( id->p_uf_chain, p_user_filtered_pic ); if( !p_user_filtered_pic ) break; OutputFrame( p_stream, p_user_filtered_pic, id, out ); p_filtered_pic = NULL; } p_pic = NULL; } } if( p_sys->i_threads >= 1 ) { /* Pick up any return data the encoder thread wants to output. */ vlc_mutex_lock( &p_sys->lock_out ); *out = p_sys->p_buffers; p_sys->p_buffers = NULL; vlc_mutex_unlock( &p_sys->lock_out ); } return VLC_SUCCESS; }
static picture_t *Filter( filter_t *p_filter, picture_t *p_pic ) { picture_t *p_outpic; filter_sys_t *p_sys = p_filter->p_sys; int i_plane; const int i_dim = p_sys->i_dim; type_t *pt_buffer; type_t *pt_scale; const type_t *pt_distribution = p_sys->pt_distribution; if( !p_pic ) return NULL; p_outpic = filter_NewPicture( p_filter ); if( !p_outpic ) { picture_Release( p_pic ); return NULL; } if( !p_sys->pt_buffer ) { p_sys->pt_buffer = realloc( p_sys->pt_buffer, p_pic->p[Y_PLANE].i_visible_lines * p_pic->p[Y_PLANE].i_pitch * sizeof( type_t ) ); } pt_buffer = p_sys->pt_buffer; if( !p_sys->pt_scale ) { const int i_visible_lines = p_pic->p[Y_PLANE].i_visible_lines; const int i_visible_pitch = p_pic->p[Y_PLANE].i_visible_pitch; const int i_pitch = p_pic->p[Y_PLANE].i_pitch; int i_col, i_line; p_sys->pt_scale = malloc( i_visible_lines * i_pitch * sizeof( type_t ) ); pt_scale = p_sys->pt_scale; for( i_line = 0 ; i_line < i_visible_lines ; i_line++ ) { for( i_col = 0; i_col < i_visible_pitch ; i_col++ ) { int x, y; type_t t_value = 0; for( y = __MAX( -i_dim, -i_line ); y <= __MIN( i_dim, i_visible_lines - i_line - 1 ); y++ ) { for( x = __MAX( -i_dim, -i_col ); x <= __MIN( i_dim, i_visible_pitch - i_col + 1 ); x++ ) { t_value += pt_distribution[y+i_dim] * pt_distribution[x+i_dim]; } } pt_scale[i_line*i_pitch+i_col] = t_value; } } } pt_scale = p_sys->pt_scale; for( i_plane = 0 ; i_plane < p_pic->i_planes ; i_plane++ ) { uint8_t *p_in = p_pic->p[i_plane].p_pixels; uint8_t *p_out = p_outpic->p[i_plane].p_pixels; const int i_visible_lines = p_pic->p[i_plane].i_visible_lines; const int i_visible_pitch = p_pic->p[i_plane].i_visible_pitch; const int i_pitch = p_pic->p[i_plane].i_pitch; int i_line, i_col; const int x_factor = p_pic->p[Y_PLANE].i_visible_pitch/i_visible_pitch-1; const int y_factor = p_pic->p[Y_PLANE].i_visible_lines/i_visible_lines-1; for( i_line = 0 ; i_line < i_visible_lines ; i_line++ ) { for( i_col = 0; i_col < i_visible_pitch ; i_col++ ) { type_t t_value = 0; int x; const int c = i_line*i_pitch+i_col; for( x = __MAX( -i_dim, -i_col*(x_factor+1) ); x <= __MIN( i_dim, (i_visible_pitch - i_col)*(x_factor+1) + 1 ); x++ ) { t_value += pt_distribution[x+i_dim] * p_in[c+(x>>x_factor)]; } pt_buffer[c] = t_value; } } for( i_line = 0 ; i_line < i_visible_lines ; i_line++ ) { for( i_col = 0; i_col < i_visible_pitch ; i_col++ ) { type_t t_value = 0; int y; const int c = i_line*i_pitch+i_col; for( y = __MAX( -i_dim, (-i_line)*(y_factor+1) ); y <= __MIN( i_dim, (i_visible_lines - i_line)*(y_factor+1) - 1 ); y++ ) { t_value += pt_distribution[y+i_dim] * pt_buffer[c+(y>>y_factor)*i_pitch]; } const type_t t_scale = pt_scale[(i_line<<y_factor)*(i_pitch<<x_factor)+(i_col<<x_factor)]; p_out[c] = (uint8_t)(t_value / t_scale); // FIXME wouldn't it be better to round instead of trunc ? } } } return CopyInfoAndRelease( p_outpic, p_pic ); }
static picture_t *ImageRead( image_handler_t *p_image, block_t *p_block, video_format_t *p_fmt_in, video_format_t *p_fmt_out ) { picture_t *p_pic = NULL, *p_tmp; /* Check if we can reuse the current decoder */ if( p_image->p_dec && p_image->p_dec->fmt_in.i_codec != p_fmt_in->i_chroma ) { DeleteDecoder( p_image->p_dec ); p_image->p_dec = 0; } /* Start a decoder */ if( !p_image->p_dec ) { p_image->p_dec = CreateDecoder( p_image->p_parent, p_fmt_in ); if( !p_image->p_dec ) return NULL; } p_block->i_pts = p_block->i_dts = mdate(); while( (p_tmp = p_image->p_dec->pf_decode_video( p_image->p_dec, &p_block )) != NULL ) { if( p_pic != NULL ) picture_Release( p_pic ); p_pic = p_tmp; } if( p_pic == NULL ) { msg_Warn( p_image->p_parent, "no image decoded" ); return 0; } if( !p_fmt_out->i_chroma ) p_fmt_out->i_chroma = p_image->p_dec->fmt_out.video.i_chroma; if( !p_fmt_out->i_width && p_fmt_out->i_height ) p_fmt_out->i_width = (int64_t)p_image->p_dec->fmt_out.video.i_width * p_image->p_dec->fmt_out.video.i_sar_num * p_fmt_out->i_height / p_image->p_dec->fmt_out.video.i_height / p_image->p_dec->fmt_out.video.i_sar_den; if( !p_fmt_out->i_height && p_fmt_out->i_width ) p_fmt_out->i_height = (int64_t)p_image->p_dec->fmt_out.video.i_height * p_image->p_dec->fmt_out.video.i_sar_den * p_fmt_out->i_width / p_image->p_dec->fmt_out.video.i_width / p_image->p_dec->fmt_out.video.i_sar_num; if( !p_fmt_out->i_width ) p_fmt_out->i_width = p_image->p_dec->fmt_out.video.i_width; if( !p_fmt_out->i_height ) p_fmt_out->i_height = p_image->p_dec->fmt_out.video.i_height; if( !p_fmt_out->i_visible_width ) p_fmt_out->i_visible_width = p_fmt_out->i_width; if( !p_fmt_out->i_visible_height ) p_fmt_out->i_visible_height = p_fmt_out->i_height; /* Check if we need chroma conversion or resizing */ if( p_image->p_dec->fmt_out.video.i_chroma != p_fmt_out->i_chroma || p_image->p_dec->fmt_out.video.i_width != p_fmt_out->i_width || p_image->p_dec->fmt_out.video.i_height != p_fmt_out->i_height ) { if( p_image->p_filter ) if( p_image->p_filter->fmt_in.video.i_chroma != p_image->p_dec->fmt_out.video.i_chroma || p_image->p_filter->fmt_out.video.i_chroma != p_fmt_out->i_chroma ) { /* We need to restart a new filter */ DeleteFilter( p_image->p_filter ); p_image->p_filter = 0; } /* Start a filter */ if( !p_image->p_filter ) { p_image->p_filter = CreateFilter( p_image->p_parent, &p_image->p_dec->fmt_out, p_fmt_out, NULL ); if( !p_image->p_filter ) { picture_Release( p_pic ); return NULL; } } else { /* Filters should handle on-the-fly size changes */ p_image->p_filter->fmt_in = p_image->p_dec->fmt_out; p_image->p_filter->fmt_out = p_image->p_dec->fmt_out; p_image->p_filter->fmt_out.i_codec = p_fmt_out->i_chroma; p_image->p_filter->fmt_out.video = *p_fmt_out; } p_pic = p_image->p_filter->pf_video_filter( p_image->p_filter, p_pic ); *p_fmt_out = p_image->p_filter->fmt_out.video; } else *p_fmt_out = p_image->p_dec->fmt_out.video; return p_pic; }