Exemplo n.º 1
0
int
ndga_isvisible(ndga_t sp)
{
	if (!sp) {
		/*
		 * Safest to say yes?
		 */
		return (1);
	}

	if (!sp->busy && sp->is_changed) {
		ndga_handle_change(sp);
		sp->is_changed = 0;
	}

	if (!sp->busy && sp->new_clip) {
		update_clip(sp);
	}

#ifdef X_VISIBILITY_BUG
	ndga_workaround(sp);
#endif /* X_VISIBILITY_BUG */

	DBG(fprintf(stderr, "is_visible %d\n",  sp->is_open && sp->is_visible));
	return (sp->is_open && sp->is_visible);
}
Exemplo n.º 2
0
void
ndga_done(struct ndga_state * sp)
{
	if (!sp)
		return;

	if (sp->is_changed) {
		ndga_handle_change(sp);
		sp->is_changed = 0;
	}

	if (sp->new_clip) {
		update_clip(sp);
	}

	sp->busy = 0;

#ifdef X_VISIBILITY_BUG
	ndga_workaround(sp);
#endif /* X_VISIBILITY_BUG */
}
Exemplo n.º 3
0
void
ndga_process_event(struct ndga_state * sp, XEvent * event)
{
	NRectangle	rectangle;

	if (!sp)
		return;

	if (event->xany.type == Expose) {
#ifdef X_VISIBILITY_BUG
		if (sp->clearOnExpose) {
			NDestroyRegion(sp->region);
			sp->region = NCreateRegion();
			sp->clearOnExpose = 0;
		}
#endif /* X_VISIBILITY_BUG */
		rectangle.x = event->xexpose.x;
		rectangle.y = event->xexpose.y;
		rectangle.width = event->xexpose.width;
		rectangle.height = event->xexpose.height;
		NUnionRectWithRegion(&rectangle, sp->region, sp->region);

		DBG(fprintf(stderr, "Expose: %d,%d %dx%d\n",
			event->xexpose.x,
			event->xexpose.y,
			event->xexpose.width,
			event->xexpose.height));
		if (event->xexpose.count == 0) {
			/*
			 * Handle the last in a sequence of expose events.
			 */
			sp->is_changed = 1;
		}
	} else if (event->xany.type == VisibilityNotify) {
		sp->is_visible =
		    (event->xvisibility.state != VisibilityFullyObscured);
		sp->is_unobscured =
		    (event->xvisibility.state == VisibilityUnobscured);
		sp->is_changed = 1;
		DBG(fprintf(stderr, "Visiblility: %s\n",
			(event->xvisibility.state == VisibilityUnobscured) ?
			"Unobscured" :
			((event->xvisibility.state == VisibilityFullyObscured) ?
			    "FullyObscured" : "PartuallyObscured")));
		if (event->xvisibility.state == VisibilityPartiallyObscured) {
			Window		w;
			DBG(fprintf(stderr, "Request full clip\n"));

			NDestroyRegion(sp->region);
			sp->region = NCreateRegion();

			w = XCreateSimpleWindow(sp->display, sp->window,
			    0, 0, 30000, 30000, 0, 0, 0);
			XMapWindow(sp->display, w);
			XFlush(sp->display);
			XDestroyWindow(sp->display, w);
			XFlush(sp->display);
			/*
			 * sync so we will get expose events all together
			 */ 
			XSync(sp->display, False);
		}
	} else if (event->xany.type == UnmapNotify) {
		DBG(fprintf(stderr, "Unmap\n"));
		sp->is_changed = 1;
		NDestroyRegion(sp->region);
		sp->region = NCreateRegion();
	} else if (event->xany.type == ConfigureNotify) {
		DBG(fprintf(stderr, "Configure\n"));
		sp->is_changed = 1;
	} else if (event->xany.type == CirculateNotify) {
		DBG(fprintf(stderr, "Circulate\n"));
	} else if (event->xany.type == MapNotify) {
		DBG(fprintf(stderr, "Map\n"));
	} else {
		DBG(fprintf(stderr, "Event %d\n", event->xany.type));
	}

	if (XPending(sp->display) != 0)
		return;
	DBG(fprintf(stderr, "Last Event\n\n"));

	if ((sp->on_open || sp->on_close) && sp->is_changed) {
		if (sp->is_open != sp->was_open) {
			if (!sp->is_open) {
				DBG(fprintf(stderr, "I-CLOSED\n"));
				if (sp->on_close)
					(*sp->on_close) ();
			} else {
				DBG(fprintf(stderr, "I-OPEN\n"));
				if (sp->on_open)
					(*sp->on_open) ();
			}

			sp->was_open = sp->is_open;
		}
	}

	if (sp->is_changed) {
		ndga_handle_change(sp);
		if (!sp->busy)
			sp->is_changed = 0;
	}

	if (!sp->busy && sp->new_clip) {
		update_clip(sp);
	}
}
Exemplo n.º 4
0
/*
 * attempt to start direct io; returns 1 if visible and sets the
 * window's location in xp, yp, wp, hp. is_obscured is set if all
 * parts of the window are visible. clipEvent contains an event
 * counter for changes in the clip list. When a caller observes
 * a change in the counter, it should obtain the new clip.
 *
 * Returns 0 if the window is not visible.
 */
int
ndga_start(struct ndga_state * sp,
    int *xp,			/* x */
    int *yp,			/* y */
    int *wp,			/* width */
    int *hp,			/* height */
    int *is_unobscured,		/* 1 if clipped, 0 if not */
    int *clipEvent		/* clip event count */)
{
	char	       *loc;

	if (!sp)
		return (0);

	if (sp->is_changed) {
		ndga_handle_change(sp);
		sp->is_changed = 0;
	}

	if (sp->new_clip) {
		update_clip(sp);
	}

	/*
	 * If fully visible or if partially visible and the client is
	 * expecting to handle clipping, allow it to go through
	 */
	if (sp->is_open && sp->is_visible && 
	    (sp->is_unobscured || 
	    (clipEvent != NULL && is_unobscured != NULL))) {
		if (sp->session && sp->sent_clip == 0) {
			nvSessionClipRegion(sp->session, sp->clip);
			sp->sent_clip = 1;
		}

		*xp = sp->visRect.x;
		*wp = sp->visRect.width;

		*yp = sp->visRect.y;
		*hp = sp->visRect.height;

		sp->busy = 1;

		if (clipEvent)
			*clipEvent = sp->clipEvent;

		if (is_unobscured)
			*is_unobscured = sp->is_unobscured;

		DBG(fprintf(stderr, "ndga_start() = 1\n"));
		return (1);
	}

	if (sp->is_open && sp->is_visible) {
		/*
		 * Assume it will just go through X; slow it down
		 */
		poll(0, 0, sp->w * sp->h / 450);
	}

	/*
	 * not OK
	 */
	if (sp->session && sp->sent_clip) {
		sp->sent_clip = 0;
		nvSessionClipRegion(sp->session, NULL);
	}

	if (clipEvent)
		*clipEvent = sp->clipEvent;

	if (is_unobscured)
		*is_unobscured = 0;

	return (0);
}
Exemplo n.º 5
0
static LRESULT CALLBACK wndproc(HWND hwnd, UINT msg, WPARAM wparam, LPARAM lparam)
{
    int x, y, w, h;
    msg_t m;

    x = GET_X_LPARAM(lparam);
    y = GET_Y_LPARAM(lparam);
    w = LOWORD(lparam);
    h = HIWORD(lparam);

    switch (msg) {
    case WM_CLOSE:
        PostQuitMessage(0);
        break;
    case WM_SIZE:
        if (init_done)
            do_resize(w, h);
        break;
    case WM_MOVE:
        break;
    case WM_EXITSIZEMOVE:
        break;
    case WM_MOUSELEAVE:
        tracking = 0;
        break;
    case WM_MOUSEMOVE:
        if (clipcursor)
            update_clip(hwnd); //TODO: shouldnt be here

        if (!tracking) {
            TrackMouseEvent(&tme);
            tracking = 1;
        }
        input_mmove(&input, x, y, 0);
        break;
    case WM_LBUTTONDOWN:
        input_mdown(&input, x, y, 0, 0);
        break;
    case WM_LBUTTONUP:
        input_mup(&input, x, y, 0, 0);
        break;
    case WM_RBUTTONDOWN:
        input_mdown(&input, x, y, 0, 2);
        break;
    case WM_RBUTTONUP:
        input_mup(&input, x, y, 0, 2);
        break;
    case WM_MBUTTONDOWN:
        input_mdown(&input, x, y, 0, 1);
        break;
    case WM_MBUTTONUP:
        input_mup(&input, x, y, 0, 1);
        break;
    case WM_MOUSEWHEEL:
        input_mwheel(&input, (double) (int16_t)HIWORD(wparam) / 120.0);
        break;
    case WM_KEYDOWN:
        if (wparam == VK_F1) {
            if (!clipcursor)
                update_clip(hwnd);
            else
                ClipCursor(0);
            clipcursor = !clipcursor;
            break;
        }
        input_keydown(&input, wparam, wparam);
        break;
    case WM_KEYUP:
        input_keyup(&input, wparam);
        break;
    case WM_USER:
        m.v = wparam;
        do_msg(m.id, m.v8, m.v16, m.value, (void*)lparam);
        break;
    default:
        return DefWindowProc(hwnd, msg, wparam, lparam);
    }

    return 0;
}
Exemplo n.º 6
0
Arquivo: avs.c Projeto: xing2fan/x264
static int open_file( char *psz_filename, hnd_t *p_handle, video_info_t *info, cli_input_opt_t *opt )
{
    FILE *fh = x264_fopen( psz_filename, "r" );
    if( !fh )
        return -1;
    int b_regular = x264_is_regular_file( fh );
    fclose( fh );
    FAIL_IF_ERROR( !b_regular, "AVS input is incompatible with non-regular file `%s'\n", psz_filename );

    avs_hnd_t *h = calloc( 1, sizeof(avs_hnd_t) );
    if( !h )
        return -1;
    FAIL_IF_ERROR( custom_avs_load_library( h ), "failed to load avisynth\n" );
    h->env = h->func.avs_create_script_environment( AVS_INTERFACE_25 );
    if( h->func.avs_get_error )
    {
        const char *error = h->func.avs_get_error( h->env );
        FAIL_IF_ERROR( error, "%s\n", error );
    }
    float avs_version = get_avs_version( h );
    if( avs_version <= 0 )
        return -1;
    x264_cli_log( "avs", X264_LOG_DEBUG, "using avisynth version %.2f\n", avs_version );

#ifdef _WIN32
    /* Avisynth doesn't support Unicode filenames. */
    char ansi_filename[MAX_PATH];
    FAIL_IF_ERROR( !x264_ansi_filename( psz_filename, ansi_filename, MAX_PATH, 0 ), "invalid ansi filename\n" );
    AVS_Value arg = avs_new_value_string( ansi_filename );
#else
    AVS_Value arg = avs_new_value_string( psz_filename );
#endif

    AVS_Value res;
    char *filename_ext = get_filename_extension( psz_filename );

    if( !strcasecmp( filename_ext, "avs" ) )
    {
        res = h->func.avs_invoke( h->env, "Import", arg, NULL );
        FAIL_IF_ERROR( avs_is_error( res ), "%s\n", avs_as_error( res ) );
        /* check if the user is using a multi-threaded script and apply distributor if necessary.
           adapted from avisynth's vfw interface */
        AVS_Value mt_test = h->func.avs_invoke( h->env, "GetMTMode", avs_new_value_bool( 0 ), NULL );
        int mt_mode = avs_is_int( mt_test ) ? avs_as_int( mt_test ) : 0;
        h->func.avs_release_value( mt_test );
        if( mt_mode > 0 && mt_mode < 5 )
        {
            AVS_Value temp = h->func.avs_invoke( h->env, "Distributor", res, NULL );
            h->func.avs_release_value( res );
            res = temp;
        }
    }
    else /* non script file */
    {
        /* cycle through known source filters to find one that works */
        const char *filter[AVS_MAX_SEQUENCE+1] = { 0 };
        avs_build_filter_sequence( filename_ext, filter );
        int i;
        for( i = 0; filter[i]; i++ )
        {
            x264_cli_log( "avs", X264_LOG_INFO, "trying %s... ", filter[i] );
            if( !h->func.avs_function_exists( h->env, filter[i] ) )
            {
                x264_cli_printf( X264_LOG_INFO, "not found\n" );
                continue;
            }
            if( !strncasecmp( filter[i], "FFmpegSource", 12 ) )
            {
                x264_cli_printf( X264_LOG_INFO, "indexing... " );
                fflush( stderr );
            }
            res = h->func.avs_invoke( h->env, filter[i], arg, NULL );
            if( !avs_is_error( res ) )
            {
                x264_cli_printf( X264_LOG_INFO, "succeeded\n" );
                break;
            }
            x264_cli_printf( X264_LOG_INFO, "failed\n" );
        }
        FAIL_IF_ERROR( !filter[i], "unable to find source filter to open `%s'\n", psz_filename );
    }
    FAIL_IF_ERROR( !avs_is_clip( res ), "`%s' didn't return a video clip\n", psz_filename );
    h->clip = h->func.avs_take_clip( res, h->env );
    const AVS_VideoInfo *vi = h->func.avs_get_video_info( h->clip );
    FAIL_IF_ERROR( !avs_has_video( vi ), "`%s' has no video data\n", psz_filename );
    /* if the clip is made of fields instead of frames, call weave to make them frames */
    if( avs_is_field_based( vi ) )
    {
        x264_cli_log( "avs", X264_LOG_WARNING, "detected fieldbased (separated) input, weaving to frames\n" );
        AVS_Value tmp = h->func.avs_invoke( h->env, "Weave", res, NULL );
        FAIL_IF_ERROR( avs_is_error( tmp ), "couldn't weave fields into frames: %s\n", avs_as_error( tmp ) );
        res = update_clip( h, &vi, tmp, res );
        info->interlaced = 1;
        info->tff = avs_is_tff( vi );
    }
#if !HAVE_SWSCALE
    /* if swscale is not available, convert the CSP if necessary */
    FAIL_IF_ERROR( avs_version < 2.6f && (opt->output_csp == X264_CSP_I400 || opt->output_csp == X264_CSP_I422 || opt->output_csp == X264_CSP_I444),
                   "avisynth >= 2.6 is required for i400/i422/i444 output\n" );
    if( (opt->output_csp == X264_CSP_I400 && !AVS_IS_Y( vi )) ||
        (opt->output_csp == X264_CSP_I420 && !AVS_IS_420( vi )) ||
        (opt->output_csp == X264_CSP_I422 && !AVS_IS_422( vi )) ||
        (opt->output_csp == X264_CSP_I444 && !AVS_IS_444( vi )) ||
        (opt->output_csp == X264_CSP_RGB && !avs_is_rgb( vi )) )
    {
        const char *csp;
        if( AVS_IS_AVISYNTHPLUS )
        {
            csp = opt->output_csp == X264_CSP_I400 ? "Y" :
                  opt->output_csp == X264_CSP_I420 ? "YUV420" :
                  opt->output_csp == X264_CSP_I422 ? "YUV422" :
                  opt->output_csp == X264_CSP_I444 ? "YUV444" :
                  "RGB";
        }
        else
        {
            csp = opt->output_csp == X264_CSP_I400 ? "Y8" :
                  opt->output_csp == X264_CSP_I420 ? "YV12" :
                  opt->output_csp == X264_CSP_I422 ? "YV16" :
                  opt->output_csp == X264_CSP_I444 ? "YV24" :
                  "RGB";
        }
        x264_cli_log( "avs", X264_LOG_WARNING, "converting input clip to %s\n", csp );
        if( opt->output_csp != X264_CSP_I400 )
        {
            FAIL_IF_ERROR( opt->output_csp < X264_CSP_I444 && (vi->width&1),
                           "input clip width not divisible by 2 (%dx%d)\n", vi->width, vi->height );
            FAIL_IF_ERROR( opt->output_csp == X264_CSP_I420 && info->interlaced && (vi->height&3),
                           "input clip height not divisible by 4 (%dx%d)\n", vi->width, vi->height );
            FAIL_IF_ERROR( (opt->output_csp == X264_CSP_I420 || info->interlaced) && (vi->height&1),
                           "input clip height not divisible by 2 (%dx%d)\n", vi->width, vi->height );
        }
        char conv_func[16];
        snprintf( conv_func, sizeof(conv_func), "ConvertTo%s", csp );
        AVS_Value arg_arr[3];
        const char *arg_name[3];
        int arg_count = 1;
        arg_arr[0] = res;
        arg_name[0] = NULL;
        if( opt->output_csp != X264_CSP_I400 )
        {
            arg_arr[arg_count] = avs_new_value_bool( info->interlaced );
            arg_name[arg_count] = "interlaced";
            arg_count++;
        }
        /* if doing a rgb <-> yuv conversion then range is handled via 'matrix'. though it's only supported in 2.56+ */
        char matrix[7];
        if( avs_version >= 2.56f && ((opt->output_csp == X264_CSP_RGB && avs_is_yuv( vi )) || (opt->output_csp != X264_CSP_RGB && avs_is_rgb( vi ))) )
        {
            // if converting from yuv, then we specify the matrix for the input, otherwise use the output's.
            int use_pc_matrix = avs_is_yuv( vi ) ? opt->input_range == RANGE_PC : opt->output_range == RANGE_PC;
            snprintf( matrix, sizeof(matrix), "%s601", use_pc_matrix ? "PC." : "Rec" ); /* FIXME: use correct coefficients */
            arg_arr[arg_count] = avs_new_value_string( matrix );
            arg_name[arg_count] = "matrix";
            arg_count++;
            // notification that the input range has changed to the desired one
            opt->input_range = opt->output_range;
        }
        AVS_Value res2 = h->func.avs_invoke( h->env, conv_func, avs_new_value_array( arg_arr, arg_count ), arg_name );
        FAIL_IF_ERROR( avs_is_error( res2 ), "couldn't convert input clip to %s: %s\n", csp, avs_as_error( res2 ) );
        res = update_clip( h, &vi, res2, res );
    }
    /* if swscale is not available, change the range if necessary. This only applies to YUV-based CSPs however */
    if( avs_is_yuv( vi ) && opt->output_range != RANGE_AUTO && ((opt->input_range == RANGE_PC) != opt->output_range) )
    {
        const char *levels = opt->output_range ? "TV->PC" : "PC->TV";
        x264_cli_log( "avs", X264_LOG_WARNING, "performing %s conversion\n", levels );
        AVS_Value arg_arr[2];
        arg_arr[0] = res;
        arg_arr[1] = avs_new_value_string( levels );
        const char *arg_name[] = { NULL, "levels" };
        AVS_Value res2 = h->func.avs_invoke( h->env, "ColorYUV", avs_new_value_array( arg_arr, 2 ), arg_name );
        FAIL_IF_ERROR( avs_is_error( res2 ), "couldn't convert range: %s\n", avs_as_error( res2 ) );
        res = update_clip( h, &vi, res2, res );
        // notification that the input range has changed to the desired one
        opt->input_range = opt->output_range;
    }
#endif

    h->func.avs_release_value( res );

    info->width   = vi->width;
    info->height  = vi->height;
    info->fps_num = vi->fps_numerator;
    info->fps_den = vi->fps_denominator;
    h->num_frames = info->num_frames = vi->num_frames;
    info->thread_safe = 1;
    if( AVS_IS_RGB64( vi ) )
        info->csp = X264_CSP_BGRA | X264_CSP_VFLIP | X264_CSP_HIGH_DEPTH;
    else if( avs_is_rgb32( vi ) )
        info->csp = X264_CSP_BGRA | X264_CSP_VFLIP;
    else if( AVS_IS_RGB48( vi ) )
        info->csp = X264_CSP_BGR | X264_CSP_VFLIP | X264_CSP_HIGH_DEPTH;
    else if( avs_is_rgb24( vi ) )
        info->csp = X264_CSP_BGR | X264_CSP_VFLIP;
    else if( AVS_IS_YUV444P16( vi ) )
        info->csp = X264_CSP_I444 | X264_CSP_HIGH_DEPTH;
    else if( avs_is_yv24( vi ) )
        info->csp = X264_CSP_I444;
    else if( AVS_IS_YUV422P16( vi ) )
        info->csp = X264_CSP_I422 | X264_CSP_HIGH_DEPTH;
    else if( avs_is_yv16( vi ) )
        info->csp = X264_CSP_I422;
    else if( AVS_IS_YUV420P16( vi ) )
        info->csp = X264_CSP_I420 | X264_CSP_HIGH_DEPTH;
    else if( avs_is_yv12( vi ) )
        info->csp = X264_CSP_I420;
    else if( AVS_IS_Y16( vi ) )
        info->csp = X264_CSP_I400 | X264_CSP_HIGH_DEPTH;
    else if( avs_is_y8( vi ) )
        info->csp = X264_CSP_I400;
    else if( avs_is_yuy2( vi ) )
        info->csp = X264_CSP_YUYV;
#if HAVE_SWSCALE
    else if( avs_is_yv411( vi ) )
        info->csp = AV_PIX_FMT_YUV411P | X264_CSP_OTHER;
#endif
    else
    {
        AVS_Value pixel_type = h->func.avs_invoke( h->env, "PixelType", res, NULL );
        const char *pixel_type_name = avs_is_string( pixel_type ) ? avs_as_string( pixel_type ) : "unknown";
        FAIL_IF_ERROR( 1, "not supported pixel type: %s\n", pixel_type_name );
    }
    info->vfr = 0;

    *p_handle = h;
    return 0;
}
Exemplo n.º 7
0
static int open_file( char *psz_filename, hnd_t *p_handle, video_info_t *info, cli_input_opt_t *opt )
{
    FILE *fh = fopen( psz_filename, "r" );
    if( !fh )
        return -1;
    FAIL_IF_ERROR( !x264_is_regular_file( fh ), "AVS input is incompatible with non-regular file `%s'\n", psz_filename );
    fclose( fh );

    avs_hnd_t *h = malloc( sizeof(avs_hnd_t) );
    if( !h )
        return -1;
    FAIL_IF_ERROR( x264_avs_load_library( h ), "failed to load avisynth\n" )
    h->env = h->func.avs_create_script_environment( AVS_INTERFACE_25 );
    FAIL_IF_ERROR( !h->env, "failed to initiate avisynth\n" )
    AVS_Value arg = avs_new_value_string( psz_filename );
    AVS_Value res;
    char *filename_ext = get_filename_extension( psz_filename );

    if( !strcasecmp( filename_ext, "avs" ) )
    {
        res = h->func.avs_invoke( h->env, "Import", arg, NULL );
        FAIL_IF_ERROR( avs_is_error( res ), "%s\n", avs_as_string( res ) )
        /* check if the user is using a multi-threaded script and apply distributor if necessary.
           adapted from avisynth's vfw interface */
        AVS_Value mt_test = h->func.avs_invoke( h->env, "GetMTMode", avs_new_value_bool( 0 ), NULL );
        int mt_mode = avs_is_int( mt_test ) ? avs_as_int( mt_test ) : 0;
        h->func.avs_release_value( mt_test );
        if( mt_mode > 0 && mt_mode < 5 )
        {
            AVS_Value temp = h->func.avs_invoke( h->env, "Distributor", res, NULL );
            h->func.avs_release_value( res );
            res = temp;
        }
    }
    else /* non script file */
    {
        /* cycle through known source filters to find one that works */
        const char *filter[AVS_MAX_SEQUENCE+1] = { 0 };
        avs_build_filter_sequence( filename_ext, filter );
        int i;
        for( i = 0; filter[i]; i++ )
        {
            x264_cli_log( "avs", X264_LOG_INFO, "trying %s... ", filter[i] );
            if( !h->func.avs_function_exists( h->env, filter[i] ) )
            {
                x264_cli_printf( X264_LOG_INFO, "not found\n" );
                continue;
            }
            if( !strncasecmp( filter[i], "FFmpegSource", 12 ) )
            {
                x264_cli_printf( X264_LOG_INFO, "indexing... " );
                fflush( stderr );
            }
            res = h->func.avs_invoke( h->env, filter[i], arg, NULL );
            if( !avs_is_error( res ) )
            {
                x264_cli_printf( X264_LOG_INFO, "succeeded\n" );
                break;
            }
            x264_cli_printf( X264_LOG_INFO, "failed\n" );
        }
        FAIL_IF_ERROR( !filter[i], "unable to find source filter to open `%s'\n", psz_filename )
    }
    FAIL_IF_ERROR( !avs_is_clip( res ), "`%s' didn't return a video clip\n", psz_filename )
    h->clip = h->func.avs_take_clip( res, h->env );
    const AVS_VideoInfo *vi = h->func.avs_get_video_info( h->clip );
    FAIL_IF_ERROR( !avs_has_video( vi ), "`%s' has no video data\n", psz_filename )
    /* if the clip is made of fields instead of frames, call weave to make them frames */
    if( avs_is_field_based( vi ) )
    {
        x264_cli_log( "avs", X264_LOG_WARNING, "detected fieldbased (separated) input, weaving to frames\n" );
        AVS_Value tmp = h->func.avs_invoke( h->env, "Weave", res, NULL );
        FAIL_IF_ERROR( avs_is_error( tmp ), "couldn't weave fields into frames\n" )
        res = update_clip( h, &vi, tmp, res );
        info->interlaced = 1;
        info->tff = avs_is_tff( vi );
    }
#if !HAVE_SWSCALE
    /* if swscale is not available, convert CSPs to yv12 */
    if( !avs_is_yv12( vi ) )
    {
        x264_cli_log( "avs", X264_LOG_WARNING, "converting input clip to YV12\n" );
        FAIL_IF_ERROR( vi->width&1 || vi->height&1, "input clip width or height not divisible by 2 (%dx%d)\n", vi->width, vi->height )
        const char *arg_name[2] = { NULL, "interlaced" };
        AVS_Value arg_arr[2] = { res, avs_new_value_bool( info->interlaced ) };
        AVS_Value res2 = h->func.avs_invoke( h->env, "ConvertToYV12", avs_new_value_array( arg_arr, 2 ), arg_name );
        FAIL_IF_ERROR( avs_is_error( res2 ), "couldn't convert input clip to YV12\n" )
        res = update_clip( h, &vi, res2, res );
    }