コード例 #1
0
ファイル: matroska_ebml.c プロジェクト: MasterNobody/x264
mk_writer *mk_create_writer( const char *filename )
{
    mk_writer *w = calloc( 1, sizeof(mk_writer) );
    if( !w )
        return NULL;

    w->root = mk_create_context( w, NULL, 0 );
    if( !w->root )
    {
        free( w );
        return NULL;
    }

    if( !strcmp( filename, "-" ) )
        w->fp = stdout;
    else
        w->fp = x264_fopen( filename, "wb" );
    if( !w->fp )
    {
        mk_destroy_contexts( w );
        free( w );
        return NULL;
    }

    w->timescale = 1000000;

    return w;
}
コード例 #2
0
ファイル: mp4_lsmash.c プロジェクト: JRuiChen/x264_svc
static int open_file( char *psz_filename, hnd_t *p_handle, cli_output_opt_t *opt )
{
    *p_handle = NULL;

    int b_regular = strcmp( psz_filename, "-" );
    b_regular = b_regular && x264_is_regular_file_path( psz_filename );
    if( b_regular )
    {
        FILE *fh = x264_fopen( psz_filename, "wb" );
        MP4_FAIL_IF_ERR( !fh, "cannot open output file `%s'.\n", psz_filename );
        b_regular = x264_is_regular_file( fh );
        fclose( fh );
    }

    mp4_hnd_t *p_mp4 = calloc( 1, sizeof(mp4_hnd_t) );
    MP4_FAIL_IF_ERR( !p_mp4, "failed to allocate memory for muxer information.\n" );

    p_mp4->b_dts_compress = opt->use_dts_compress;
    p_mp4->b_use_recovery = 0; // we don't really support recovery
    p_mp4->b_fragments    = !b_regular;
    p_mp4->b_stdout       = !strcmp( psz_filename, "-" );

    p_mp4->p_root = lsmash_open_movie( psz_filename, p_mp4->b_fragments ? LSMASH_FILE_MODE_WRITE_FRAGMENTED : LSMASH_FILE_MODE_WRITE );
    MP4_FAIL_IF_ERR_EX( !p_mp4->p_root, "failed to create root.\n" );

    p_mp4->summary = (lsmash_video_summary_t *)lsmash_create_summary( LSMASH_SUMMARY_TYPE_VIDEO );
    MP4_FAIL_IF_ERR_EX( !p_mp4->summary,
                        "failed to allocate memory for summary information of video.\n" );
    p_mp4->summary->sample_type = ISOM_CODEC_TYPE_AVC1_VIDEO;

    *p_handle = p_mp4;

    return 0;
}
コード例 #3
0
ファイル: mp4_lsmash.c プロジェクト: predmach/x264-devel
static int open_file( char *psz_filename, hnd_t *p_handle, cli_output_opt_t *opt )
{
    mp4_hnd_t *p_mp4;

    *p_handle = NULL;

    int b_regular = strcmp( psz_filename, "-" );
    b_regular = b_regular && x264_is_regular_file_path( psz_filename );
    if( b_regular )
    {
        FILE *fh = x264_fopen( psz_filename, "wb" );
        MP4_FAIL_IF_ERR( !fh, "cannot open output file `%s'.\n", psz_filename );
        b_regular = x264_is_regular_file( fh );
        fclose( fh );
    }

    p_mp4 = malloc( sizeof(mp4_hnd_t) );
    MP4_FAIL_IF_ERR( !p_mp4, "failed to allocate memory for muxer information.\n" );
    memset( p_mp4, 0, sizeof(mp4_hnd_t) );

    p_mp4->b_dts_compress = opt->use_dts_compress;
    p_mp4->b_use_recovery = 0;
    p_mp4->b_no_pasp      = 0;
    p_mp4->scale_method   = ISOM_SCALE_METHOD_MEET;
    p_mp4->b_fragments    = !b_regular;
    p_mp4->b_stdout       = !strcmp( psz_filename, "-" );

    char* ext = get_filename_extension( psz_filename );
    if( !strcmp( ext, "mov" ) || !strcmp( ext, "qt" ) )
    {
        p_mp4->major_brand = ISOM_BRAND_TYPE_QT;
        p_mp4->b_brand_qt = 1;
    }
    else if( !strcmp( ext, "3gp" ) )
    {
        p_mp4->major_brand = ISOM_BRAND_TYPE_3GP6;
        p_mp4->i_brand_3gpp = 1;
    }
    else if( !strcmp( ext, "3g2" ) )
    {
        p_mp4->major_brand = ISOM_BRAND_TYPE_3G2A;
        p_mp4->i_brand_3gpp = 2;
    }
    else
        p_mp4->major_brand = ISOM_BRAND_TYPE_MP42;

    p_mp4->p_root = lsmash_open_movie( psz_filename, p_mp4->b_fragments ? LSMASH_FILE_MODE_WRITE_FRAGMENTED : LSMASH_FILE_MODE_WRITE );
    MP4_FAIL_IF_ERR_EX( !p_mp4->p_root, "failed to create root.\n" );

    p_mp4->summary = (lsmash_video_summary_t *)lsmash_create_summary( LSMASH_SUMMARY_TYPE_VIDEO );
    MP4_FAIL_IF_ERR_EX( !p_mp4->summary,
                        "failed to allocate memory for summary information of video.\n" );
    p_mp4->summary->sample_type = ISOM_CODEC_TYPE_AVC1_VIDEO;

    *p_handle = p_mp4;

    return 0;
}
コード例 #4
0
ファイル: raw.c プロジェクト: JRuiChen/x264_svc_new
static int open_file( char *psz_filename, hnd_t *p_handle, cli_output_opt_t *opt )
{
    if( !strcmp( psz_filename, "-" ) )
        *p_handle = stdout;
    else if( !(*p_handle = x264_fopen( psz_filename, "w+b" )) )
        return -1;

    return 0;
}
コード例 #5
0
ファイル: opencl.c プロジェクト: markjreed/vice-emu
/* Try to load the cached compiled program binary, verify the device context is
 * still valid before reuse */
static cl_program x264_opencl_cache_load( x264_t *h, const char *dev_name, const char *dev_vendor, const char *driver_version )
{
    /* try to load cached program binary */
    FILE *fp = x264_fopen( h->param.psz_clbin_file, "rb" );
    x264_opencl_function_t *ocl;
    cl_program program;
    uint8_t *binary;
    size_t size;
    const uint8_t *ptr;
    cl_int status;

	if( !fp )
        return NULL;

    ocl = h->opencl.ocl;
    program = NULL;
    binary = NULL;

    fseek( fp, 0, SEEK_END );
    size = ftell( fp );
    rewind( fp );
    CHECKED_MALLOC( binary, size );

    if ( fread( binary, 1, size, fp ) != size )
        goto fail;
    ptr = (const uint8_t*)binary;

#define CHECK_STRING( STR )\
    do {\
        size_t len = strlen( STR );\
        if( size <= len || strncmp( (char*)ptr, STR, len ) )\
            goto fail;\
        else {\
            size -= (len+1); ptr += (len+1);\
        }\
    } while( 0 )

    CHECK_STRING( dev_name );
    CHECK_STRING( dev_vendor );
    CHECK_STRING( driver_version );
    CHECK_STRING( x264_opencl_source_hash );
#undef CHECK_STRING

    program = ocl->clCreateProgramWithBinary( h->opencl.context, 1, &h->opencl.device, &size, &ptr, NULL, &status );
    if( status != CL_SUCCESS )
        program = NULL;

fail:
    fclose( fp );
    x264_free( binary );
    return program;
}
コード例 #6
0
ファイル: opencl.c プロジェクト: markjreed/vice-emu
/* Save the compiled program binary to a file for later reuse.  Device context
 * is also saved in the cache file so we do not reuse stale binaries */
static void x264_opencl_cache_save( x264_t *h, cl_program program, const char *dev_name, const char *dev_vendor, const char *driver_version )
{
    FILE *fp = x264_fopen( h->param.psz_clbin_file, "wb" );
    x264_opencl_function_t *ocl;
    uint8_t *binary;

    size_t size;
    cl_int status;

	if( !fp )
    {
        x264_log( h, X264_LOG_INFO, "OpenCL: unable to open clbin file for write\n" );
        return;
    }

    ocl = h->opencl.ocl;
    binary = NULL;

    size = 0;
    status = ocl->clGetProgramInfo( program, CL_PROGRAM_BINARY_SIZES, sizeof(size_t), &size, NULL );
    if( status != CL_SUCCESS || !size )
    {
        x264_log( h, X264_LOG_INFO, "OpenCL: Unable to query program binary size, no cache file generated\n" );
        goto fail;
    }

    CHECKED_MALLOC( binary, size );
    status = ocl->clGetProgramInfo( program, CL_PROGRAM_BINARIES, sizeof(uint8_t *), &binary, NULL );
    if( status != CL_SUCCESS )
    {
        x264_log( h, X264_LOG_INFO, "OpenCL: Unable to query program binary, no cache file generated\n" );
        goto fail;
    }

    fputs( dev_name, fp );
    fputc( '\n', fp );
    fputs( dev_vendor, fp );
    fputc( '\n', fp );
    fputs( driver_version, fp );
    fputc( '\n', fp );
    fputs( x264_opencl_source_hash, fp );
    fputc( '\n', fp );
    fwrite( binary, 1, size, fp );

fail:
    fclose( fp );
    x264_free( binary );
    return;
}
コード例 #7
0
ファイル: mp4.c プロジェクト: predmach/x264-devel
static int open_file( char *psz_filename, hnd_t *p_handle, cli_output_opt_t *opt )
{
    mp4_hnd_t *p_mp4;

    *p_handle = NULL;
    FILE *fh = x264_fopen( psz_filename, "w" );
    if( !fh )
        return -1;
    FAIL_IF_ERR( !x264_is_regular_file( fh ), "mp4", "MP4 output is incompatible with non-regular file `%s'\n", psz_filename )
    fclose( fh );

    if( !(p_mp4 = malloc( sizeof(mp4_hnd_t) )) )
        return -1;

    memset( p_mp4, 0, sizeof(mp4_hnd_t) );

#ifdef _WIN32
    /* GPAC doesn't support Unicode filenames. */
    char ansi_filename[MAX_PATH];
    FAIL_IF_ERR( !x264_ansi_filename( psz_filename, ansi_filename, MAX_PATH, 1 ), "mp4", "invalid ansi filename\n" )
    p_mp4->p_file = gf_isom_open( ansi_filename, GF_ISOM_OPEN_WRITE, NULL );
#else
    p_mp4->p_file = gf_isom_open( psz_filename, GF_ISOM_OPEN_WRITE, NULL );
#endif

    p_mp4->b_dts_compress = opt->use_dts_compress;

    if( !(p_mp4->p_sample = gf_isom_sample_new()) )
    {
        close_file( p_mp4, 0, 0 );
        return -1;
    }

    gf_isom_set_brand_info( p_mp4->p_file, GF_ISOM_BRAND_AVC1, 0 );

    *p_handle = p_mp4;

    return 0;
}
コード例 #8
0
ファイル: raw.c プロジェクト: 12307/PushRTMPStreamSync
static int open_file( char *psz_filename, hnd_t *p_handle, video_info_t *info, cli_input_opt_t *opt )
{
    raw_hnd_t *h = calloc( 1, sizeof(raw_hnd_t) );
    if( !h )
        return -1;

    if( !opt->resolution )
    {
        /* try to parse the file name */
        for( char *p = psz_filename; *p; p++ )
            if( *p >= '0' && *p <= '9' && sscanf( p, "%dx%d", &info->width, &info->height ) == 2 )
                break;
    }
    else
        sscanf( opt->resolution, "%dx%d", &info->width, &info->height );
    FAIL_IF_ERROR( !info->width || !info->height, "raw input requires a resolution.\n" )
    if( opt->colorspace )
    {
        for( info->csp = X264_CSP_CLI_MAX-1; info->csp > X264_CSP_NONE; info->csp-- )
        {
            if( x264_cli_csps[info->csp].name && !strcasecmp( x264_cli_csps[info->csp].name, opt->colorspace ) )
                break;
        }
        FAIL_IF_ERROR( info->csp == X264_CSP_NONE, "unsupported colorspace `%s'\n", opt->colorspace );
    }
    else /* default */
        info->csp = X264_CSP_I420;

    h->bit_depth = opt->bit_depth;
    FAIL_IF_ERROR( h->bit_depth < 8 || h->bit_depth > 16, "unsupported bit depth `%d'\n", h->bit_depth );
    if( h->bit_depth > 8 )
        info->csp |= X264_CSP_HIGH_DEPTH;

    if( !strcmp( psz_filename, "-" ) )
        h->fh = stdin;
    else
        h->fh = x264_fopen( psz_filename, "rb" );
    if( h->fh == NULL )
        return -1;

    info->thread_safe = 1;
    info->num_frames  = 0;
    info->vfr         = 0;

    const x264_cli_csp_t *csp = x264_cli_get_csp( info->csp );
    for( int i = 0; i < csp->planes; i++ )
    {
        h->plane_size[i] = x264_cli_pic_plane_size( info->csp, info->width, info->height, i );
        h->frame_size += h->plane_size[i];
        /* x264_cli_pic_plane_size returns the size in bytes, we need the value in pixels from here on */
        h->plane_size[i] /= x264_cli_csp_depth_factor( info->csp );
    }

    if( x264_is_regular_file( h->fh ) )
    {
        fseek( h->fh, 0, SEEK_END );
        uint64_t size = ftell( h->fh );
        fseek( h->fh, 0, SEEK_SET );
        info->num_frames = size / h->frame_size;

        /* Attempt to use memory-mapped input frames if possible */
        if( !(h->bit_depth & 7) )
            h->use_mmap = !x264_cli_mmap_init( &h->mmap, h->fh );
    }

    *p_handle = h;
    return 0;
}
コード例 #9
0
ファイル: opencl.c プロジェクト: markjreed/vice-emu
/* The OpenCL source under common/opencl will be merged into common/oclobj.h by
 * the Makefile. It defines a x264_opencl_source byte array which we will pass
 * to clCreateProgramWithSource().  We also attempt to use a cache file for the
 * compiled binary, stored in the current working folder. */
static cl_program x264_opencl_compile( x264_t *h )
{
    x264_opencl_function_t *ocl = h->opencl.ocl;
    cl_program program = NULL;
    char *build_log = NULL;

    char dev_name[64];
    char dev_vendor[64];
    char driver_version[64];
    cl_int status;
    int vectorize;
    const char *buildopts;
    size_t build_log_len;
    FILE *log_file;

	status  = ocl->clGetDeviceInfo( h->opencl.device, CL_DEVICE_NAME,    sizeof(dev_name), dev_name, NULL );
    status |= ocl->clGetDeviceInfo( h->opencl.device, CL_DEVICE_VENDOR,  sizeof(dev_vendor), dev_vendor, NULL );
    status |= ocl->clGetDeviceInfo( h->opencl.device, CL_DRIVER_VERSION, sizeof(driver_version), driver_version, NULL );
    if( status != CL_SUCCESS )
        return NULL;

    // Most AMD GPUs have vector registers
    vectorize = !strcmp( dev_vendor, "Advanced Micro Devices, Inc." );
    h->opencl.b_device_AMD_SI = 0;

    if( vectorize )
    {
        cl_uint simdwidth;

		/* Disable OpenCL on Intel/AMD switchable graphics devices */
        if( x264_detect_switchable_graphics() )
        {
            x264_log( h, X264_LOG_INFO, "OpenCL acceleration disabled, switchable graphics detected\n" );
            return NULL;
        }

        /* Detect AMD SouthernIsland or newer device (single-width registers) */
        simdwidth = 4;
        status = ocl->clGetDeviceInfo( h->opencl.device, CL_DEVICE_SIMD_INSTRUCTION_WIDTH_AMD, sizeof(cl_uint), &simdwidth, NULL );
        if( status == CL_SUCCESS && simdwidth == 1 )
        {
            vectorize = 0;
            h->opencl.b_device_AMD_SI = 1;
        }
    }

    x264_log( h, X264_LOG_INFO, "OpenCL acceleration enabled with %s %s %s\n", dev_vendor, dev_name, h->opencl.b_device_AMD_SI ? "(SI)" : "" );

    program = x264_opencl_cache_load( h, dev_name, dev_vendor, driver_version );
    if( !program )
    {
        const char *strptr;
        size_t size;

		/* clCreateProgramWithSource() requires a pointer variable, you cannot just use &x264_opencl_source */
        x264_log( h, X264_LOG_INFO, "Compiling OpenCL kernels...\n" );
        strptr = (const char*)x264_opencl_source;
        size = sizeof(x264_opencl_source);
        program = ocl->clCreateProgramWithSource( h->opencl.context, 1, &strptr, &size, &status );
        if( status != CL_SUCCESS || !program )
        {
            x264_log( h, X264_LOG_WARNING, "OpenCL: unable to create program\n" );
            return NULL;
        }
    }

    /* Build the program binary for the OpenCL device */
    buildopts = vectorize ? "-DVECTORIZE=1" : "";
    status = ocl->clBuildProgram( program, 1, &h->opencl.device, buildopts, NULL, NULL );
    if( status == CL_SUCCESS )
    {
        x264_opencl_cache_save( h, program, dev_name, dev_vendor, driver_version );
        return program;
    }

    /* Compile failure, should not happen with production code. */

    build_log_len = 0;
    status = ocl->clGetProgramBuildInfo( program, h->opencl.device, CL_PROGRAM_BUILD_LOG, 0, NULL, &build_log_len );
    if( status != CL_SUCCESS || !build_log_len )
    {
        x264_log( h, X264_LOG_WARNING, "OpenCL: Compilation failed, unable to query build log\n" );
        goto fail;
    }

    build_log = x264_malloc( build_log_len );
    if( !build_log )
    {
        x264_log( h, X264_LOG_WARNING, "OpenCL: Compilation failed, unable to alloc build log\n" );
        goto fail;
    }

    status = ocl->clGetProgramBuildInfo( program, h->opencl.device, CL_PROGRAM_BUILD_LOG, build_log_len, build_log, NULL );
    if( status != CL_SUCCESS )
    {
        x264_log( h, X264_LOG_WARNING, "OpenCL: Compilation failed, unable to get build log\n" );
        goto fail;
    }

    log_file = x264_fopen( "x264_kernel_build_log.txt", "w" );
    if( !log_file )
    {
        x264_log( h, X264_LOG_WARNING, "OpenCL: Compilation failed, unable to create file x264_kernel_build_log.txt\n" );
        goto fail;
    }
    fwrite( build_log, 1, build_log_len, log_file );
    fclose( log_file );
    x264_log( h, X264_LOG_WARNING, "OpenCL: kernel build errors written to x264_kernel_build_log.txt\n" );

fail:
    x264_free( build_log );
    if( program )
        ocl->clReleaseProgram( program );
    return NULL;
}
コード例 #10
0
ファイル: y4m.c プロジェクト: r-type/vice-libretro
static int open_file( char *psz_filename, hnd_t *p_handle, video_info_t *info, cli_input_opt_t *opt )
{
    y4m_hnd_t *h = malloc( sizeof(y4m_hnd_t) );
    int i;
    uint32_t n, d;
    char header[MAX_YUV4_HEADER+10];
    char *tokend, *header_end;
    int colorspace = X264_CSP_NONE;
    int alt_colorspace = X264_CSP_NONE;
    int alt_bit_depth  = 8;
    char *tokstart;
    const x264_cli_csp_t *csp;

	if( !h )
        return -1;

    h->next_frame = 0;
    info->vfr = 0;

    if( !strcmp( psz_filename, "-" ) )
        h->fh = stdin;
    else
        h->fh = x264_fopen(psz_filename, "rb");
    if( h->fh == NULL )
        return -1;

    h->frame_header_len = strlen( Y4M_FRAME_MAGIC )+1;

    /* Read header */
    for( i = 0; i < MAX_YUV4_HEADER; i++ )
    {
        header[i] = fgetc( h->fh );
        if( header[i] == '\n' )
        {
            /* Add a space after last option. Makes parsing "444" vs
               "444alpha" easier. */
            header[i+1] = 0x20;
            header[i+2] = 0;
            break;
        }
    }
    if( i == MAX_YUV4_HEADER || strncmp( header, Y4M_MAGIC, strlen( Y4M_MAGIC ) ) )
        return -1;

    /* Scan properties */
    header_end = &header[i+1]; /* Include space */
    h->seq_header_len = i+1;
    for( tokstart = &header[strlen( Y4M_MAGIC )+1]; tokstart < header_end; tokstart++ )
    {
        if( *tokstart == 0x20 )
            continue;
        switch( *tokstart++ )
        {
            case 'W': /* Width. Required. */
                info->width = strtol( tokstart, &tokend, 10 );
                tokstart=tokend;
                break;
            case 'H': /* Height. Required. */
                info->height = strtol( tokstart, &tokend, 10 );
                tokstart=tokend;
                break;
            case 'C': /* Color space */
                colorspace = parse_csp_and_depth( tokstart, &h->bit_depth );
                tokstart = strchr( tokstart, 0x20 );
                break;
            case 'I': /* Interlace type */
                switch( *tokstart++ )
                {
                    case 't':
                        info->interlaced = 1;
                        info->tff = 1;
                        break;
                    case 'b':
                        info->interlaced = 1;
                        info->tff = 0;
                        break;
                    case 'm':
                        info->interlaced = 1;
                        break;
                    //case '?':
                    //case 'p':
                    default:
                        break;
                }
                break;
            case 'F': /* Frame rate - 0:0 if unknown */
                if( sscanf( tokstart, "%u:%u", &n, &d ) == 2 && n && d )
                {
                    x264_reduce_fraction( &n, &d );
                    info->fps_num = n;
                    info->fps_den = d;
                }
                tokstart = strchr( tokstart, 0x20 );
                break;
            case 'A': /* Pixel aspect - 0:0 if unknown */
                /* Don't override the aspect ratio if sar has been explicitly set on the commandline. */
                if( sscanf( tokstart, "%u:%u", &n, &d ) == 2 && n && d )
                {
                    x264_reduce_fraction( &n, &d );
                    info->sar_width  = n;
                    info->sar_height = d;
                }
                tokstart = strchr( tokstart, 0x20 );
                break;
            case 'X': /* Vendor extensions */
                if( !strncmp( "YSCSS=", tokstart, 6 ) )
                {
                    /* Older nonstandard pixel format representation */
                    tokstart += 6;
                    alt_colorspace = parse_csp_and_depth( tokstart, &alt_bit_depth );
                }
                tokstart = strchr( tokstart, 0x20 );
                break;
        }
    }

    if( colorspace == X264_CSP_NONE )
    {
        colorspace   = alt_colorspace;
        h->bit_depth = alt_bit_depth;
    }

    // default to 8bit 4:2:0 if nothing is specified
    if( colorspace == X264_CSP_NONE )
    {
        colorspace    = X264_CSP_I420;
        h->bit_depth  = 8;
    }

#if !defined(IDE_COMPILE) || (defined(IDE_COMPILE) && (_MSC_VER >= 1400))
    FAIL_IF_ERROR( colorspace <= X264_CSP_NONE || colorspace >= X264_CSP_MAX, "colorspace unhandled\n" )
    FAIL_IF_ERROR( h->bit_depth < 8 || h->bit_depth > 16, "unsupported bit depth `%d'\n", h->bit_depth );
#else
    if( colorspace <= X264_CSP_NONE || colorspace >= X264_CSP_MAX ){
		x264_cli_log( "y4m", 0, "colorspace unhandled\n" );
		return -1;
	}
    if( h->bit_depth < 8 || h->bit_depth > 16 ){
		x264_cli_log( "y4m", 0, "unsupported bit depth `%d'\n", h->bit_depth );
		return -1;
	}
#endif

    info->thread_safe = 1;
    info->num_frames  = 0;
    info->csp         = colorspace;
    h->frame_size     = h->frame_header_len;

    if( h->bit_depth > 8 )
        info->csp |= X264_CSP_HIGH_DEPTH;

    csp = x264_cli_get_csp( info->csp );

    for( i = 0; i < csp->planes; i++ )
    {
        h->plane_size[i] = x264_cli_pic_plane_size( info->csp, info->width, info->height, i );
        h->frame_size += h->plane_size[i];
        /* x264_cli_pic_plane_size returns the size in bytes, we need the value in pixels from here on */
        h->plane_size[i] /= x264_cli_csp_depth_factor( info->csp );
    }

    /* Most common case: frame_header = "FRAME" */
    if( x264_is_regular_file( h->fh ) )
    {
        uint64_t init_pos = ftell( h->fh );
        uint64_t i_size;

		fseek( h->fh, 0, SEEK_END );
        i_size = ftell( h->fh );
        fseek( h->fh, init_pos, SEEK_SET );
        info->num_frames = (i_size - h->seq_header_len) / h->frame_size;
    }

    *p_handle = h;
    return 0;
}
コード例 #11
0
static int open_file( char *psz_filename, hnd_t *p_handle, video_info_t *info, cli_input_opt_t *opt )
{
	vs_hnd_t *h = new vs_hnd_t;
    if( !h )
        return -1;
	
	FILE *fh = x264_fopen(psz_filename, "rb");
	if (!fh)
		return -1;
	int b_regular = x264_is_regular_file(fh);
	fclose(fh);
	FAIL_IF_ERROR(!b_regular, "VS input is incompatible with non-regular file `%s'\n", psz_filename);

	FAIL_IF_ERROR(!vsscript_init(), "Failed to initialize VapourSynth environment\n");
	h->vsapi = vsscript_getVSApi();
	if (!h->vsapi) {
		fprintf(stderr, "Failed to get VapourSynth API pointer\n");
		vsscript_finalize();
		return -1;
	}

	// Should always succeed
	if (vsscript_createScript(&h->se)) {
		fprintf(stderr, "Script environment initialization failed:\n%s\n", vsscript_getError(h->se));
		vsscript_freeScript(h->se);
		vsscript_finalize();
		return -1;
	}

	std::string strfilename = psz_filename;
	nstring scriptFilename = s2ws(strfilename);
	if (vsscript_evaluateFile(&h->se, nstringToUtf8(scriptFilename).c_str(), efSetWorkingDir)) {
		fprintf(stderr, "Script evaluation failed:\n%s\n", vsscript_getError(h->se));
		vsscript_freeScript(h->se);
		vsscript_finalize();
		return -1;
	}

	h->node = vsscript_getOutput(h->se, 0);//outputIndex
	if (!h->node) {
		fprintf(stderr, "Failed to retrieve output node. Invalid index specified?\n");
		vsscript_freeScript(h->se);
		vsscript_finalize();
		return -1;
	}

	const VSCoreInfo *vsInfo = h->vsapi->getCoreInfo(vsscript_getCore(h->se));
    h->sea = new semaphore(vsInfo->numThreads);

	const VSVideoInfo *vi = h->vsapi->getVideoInfo(h->node);
	if (vi->format->colorFamily != cmYUV) {
		fprintf(stderr, "Can only read YUV format clips"); 
		h->vsapi->freeNode(h->node);
		vsscript_freeScript(h->se);
		vsscript_finalize();
        return -1;
    }
	
	if (!isConstantFormat(vi)) {
		fprintf(stderr, "Cannot output clips with varying dimensions\n");
		h->vsapi->freeNode(h->node);
		vsscript_freeScript(h->se);
		vsscript_finalize();
		return -1;
	}

	info->width = vi->width;
	info->height = vi->height;
	info->fps_num = vi->fpsNum;
	info->fps_den = vi->fpsDen;
	info->thread_safe = 1;
	info->num_frames = vi->numFrames;

	if (vi->format->subSamplingW == 1 && vi->format->subSamplingH == 1)
		info->csp = X264_CSP_I420;
    else if (vi->format->subSamplingW == 1 && vi->format->subSamplingH == 0)
        info->csp = X264_CSP_I422;
    else if (vi->format->subSamplingW == 0 && vi->format->subSamplingH == 0)
        info->csp = X264_CSP_I444;


	h->bit_depth = vi->format->bitsPerSample;
	if (h->bit_depth > 8)
	{
		info->csp |= X264_CSP_HIGH_DEPTH;
	}

	*p_handle = (void*)h;
    return 0;
}
コード例 #12
0
ファイル: avs.c プロジェクト: xing2fan/x264
static int open_file( char *psz_filename, hnd_t *p_handle, video_info_t *info, cli_input_opt_t *opt )
{
    FILE *fh = x264_fopen( psz_filename, "r" );
    if( !fh )
        return -1;
    int b_regular = x264_is_regular_file( fh );
    fclose( fh );
    FAIL_IF_ERROR( !b_regular, "AVS input is incompatible with non-regular file `%s'\n", psz_filename );

    avs_hnd_t *h = calloc( 1, sizeof(avs_hnd_t) );
    if( !h )
        return -1;
    FAIL_IF_ERROR( custom_avs_load_library( h ), "failed to load avisynth\n" );
    h->env = h->func.avs_create_script_environment( AVS_INTERFACE_25 );
    if( h->func.avs_get_error )
    {
        const char *error = h->func.avs_get_error( h->env );
        FAIL_IF_ERROR( error, "%s\n", error );
    }
    float avs_version = get_avs_version( h );
    if( avs_version <= 0 )
        return -1;
    x264_cli_log( "avs", X264_LOG_DEBUG, "using avisynth version %.2f\n", avs_version );

#ifdef _WIN32
    /* Avisynth doesn't support Unicode filenames. */
    char ansi_filename[MAX_PATH];
    FAIL_IF_ERROR( !x264_ansi_filename( psz_filename, ansi_filename, MAX_PATH, 0 ), "invalid ansi filename\n" );
    AVS_Value arg = avs_new_value_string( ansi_filename );
#else
    AVS_Value arg = avs_new_value_string( psz_filename );
#endif

    AVS_Value res;
    char *filename_ext = get_filename_extension( psz_filename );

    if( !strcasecmp( filename_ext, "avs" ) )
    {
        res = h->func.avs_invoke( h->env, "Import", arg, NULL );
        FAIL_IF_ERROR( avs_is_error( res ), "%s\n", avs_as_error( res ) );
        /* check if the user is using a multi-threaded script and apply distributor if necessary.
           adapted from avisynth's vfw interface */
        AVS_Value mt_test = h->func.avs_invoke( h->env, "GetMTMode", avs_new_value_bool( 0 ), NULL );
        int mt_mode = avs_is_int( mt_test ) ? avs_as_int( mt_test ) : 0;
        h->func.avs_release_value( mt_test );
        if( mt_mode > 0 && mt_mode < 5 )
        {
            AVS_Value temp = h->func.avs_invoke( h->env, "Distributor", res, NULL );
            h->func.avs_release_value( res );
            res = temp;
        }
    }
    else /* non script file */
    {
        /* cycle through known source filters to find one that works */
        const char *filter[AVS_MAX_SEQUENCE+1] = { 0 };
        avs_build_filter_sequence( filename_ext, filter );
        int i;
        for( i = 0; filter[i]; i++ )
        {
            x264_cli_log( "avs", X264_LOG_INFO, "trying %s... ", filter[i] );
            if( !h->func.avs_function_exists( h->env, filter[i] ) )
            {
                x264_cli_printf( X264_LOG_INFO, "not found\n" );
                continue;
            }
            if( !strncasecmp( filter[i], "FFmpegSource", 12 ) )
            {
                x264_cli_printf( X264_LOG_INFO, "indexing... " );
                fflush( stderr );
            }
            res = h->func.avs_invoke( h->env, filter[i], arg, NULL );
            if( !avs_is_error( res ) )
            {
                x264_cli_printf( X264_LOG_INFO, "succeeded\n" );
                break;
            }
            x264_cli_printf( X264_LOG_INFO, "failed\n" );
        }
        FAIL_IF_ERROR( !filter[i], "unable to find source filter to open `%s'\n", psz_filename );
    }
    FAIL_IF_ERROR( !avs_is_clip( res ), "`%s' didn't return a video clip\n", psz_filename );
    h->clip = h->func.avs_take_clip( res, h->env );
    const AVS_VideoInfo *vi = h->func.avs_get_video_info( h->clip );
    FAIL_IF_ERROR( !avs_has_video( vi ), "`%s' has no video data\n", psz_filename );
    /* if the clip is made of fields instead of frames, call weave to make them frames */
    if( avs_is_field_based( vi ) )
    {
        x264_cli_log( "avs", X264_LOG_WARNING, "detected fieldbased (separated) input, weaving to frames\n" );
        AVS_Value tmp = h->func.avs_invoke( h->env, "Weave", res, NULL );
        FAIL_IF_ERROR( avs_is_error( tmp ), "couldn't weave fields into frames: %s\n", avs_as_error( tmp ) );
        res = update_clip( h, &vi, tmp, res );
        info->interlaced = 1;
        info->tff = avs_is_tff( vi );
    }
#if !HAVE_SWSCALE
    /* if swscale is not available, convert the CSP if necessary */
    FAIL_IF_ERROR( avs_version < 2.6f && (opt->output_csp == X264_CSP_I400 || opt->output_csp == X264_CSP_I422 || opt->output_csp == X264_CSP_I444),
                   "avisynth >= 2.6 is required for i400/i422/i444 output\n" );
    if( (opt->output_csp == X264_CSP_I400 && !AVS_IS_Y( vi )) ||
        (opt->output_csp == X264_CSP_I420 && !AVS_IS_420( vi )) ||
        (opt->output_csp == X264_CSP_I422 && !AVS_IS_422( vi )) ||
        (opt->output_csp == X264_CSP_I444 && !AVS_IS_444( vi )) ||
        (opt->output_csp == X264_CSP_RGB && !avs_is_rgb( vi )) )
    {
        const char *csp;
        if( AVS_IS_AVISYNTHPLUS )
        {
            csp = opt->output_csp == X264_CSP_I400 ? "Y" :
                  opt->output_csp == X264_CSP_I420 ? "YUV420" :
                  opt->output_csp == X264_CSP_I422 ? "YUV422" :
                  opt->output_csp == X264_CSP_I444 ? "YUV444" :
                  "RGB";
        }
        else
        {
            csp = opt->output_csp == X264_CSP_I400 ? "Y8" :
                  opt->output_csp == X264_CSP_I420 ? "YV12" :
                  opt->output_csp == X264_CSP_I422 ? "YV16" :
                  opt->output_csp == X264_CSP_I444 ? "YV24" :
                  "RGB";
        }
        x264_cli_log( "avs", X264_LOG_WARNING, "converting input clip to %s\n", csp );
        if( opt->output_csp != X264_CSP_I400 )
        {
            FAIL_IF_ERROR( opt->output_csp < X264_CSP_I444 && (vi->width&1),
                           "input clip width not divisible by 2 (%dx%d)\n", vi->width, vi->height );
            FAIL_IF_ERROR( opt->output_csp == X264_CSP_I420 && info->interlaced && (vi->height&3),
                           "input clip height not divisible by 4 (%dx%d)\n", vi->width, vi->height );
            FAIL_IF_ERROR( (opt->output_csp == X264_CSP_I420 || info->interlaced) && (vi->height&1),
                           "input clip height not divisible by 2 (%dx%d)\n", vi->width, vi->height );
        }
        char conv_func[16];
        snprintf( conv_func, sizeof(conv_func), "ConvertTo%s", csp );
        AVS_Value arg_arr[3];
        const char *arg_name[3];
        int arg_count = 1;
        arg_arr[0] = res;
        arg_name[0] = NULL;
        if( opt->output_csp != X264_CSP_I400 )
        {
            arg_arr[arg_count] = avs_new_value_bool( info->interlaced );
            arg_name[arg_count] = "interlaced";
            arg_count++;
        }
        /* if doing a rgb <-> yuv conversion then range is handled via 'matrix'. though it's only supported in 2.56+ */
        char matrix[7];
        if( avs_version >= 2.56f && ((opt->output_csp == X264_CSP_RGB && avs_is_yuv( vi )) || (opt->output_csp != X264_CSP_RGB && avs_is_rgb( vi ))) )
        {
            // if converting from yuv, then we specify the matrix for the input, otherwise use the output's.
            int use_pc_matrix = avs_is_yuv( vi ) ? opt->input_range == RANGE_PC : opt->output_range == RANGE_PC;
            snprintf( matrix, sizeof(matrix), "%s601", use_pc_matrix ? "PC." : "Rec" ); /* FIXME: use correct coefficients */
            arg_arr[arg_count] = avs_new_value_string( matrix );
            arg_name[arg_count] = "matrix";
            arg_count++;
            // notification that the input range has changed to the desired one
            opt->input_range = opt->output_range;
        }
        AVS_Value res2 = h->func.avs_invoke( h->env, conv_func, avs_new_value_array( arg_arr, arg_count ), arg_name );
        FAIL_IF_ERROR( avs_is_error( res2 ), "couldn't convert input clip to %s: %s\n", csp, avs_as_error( res2 ) );
        res = update_clip( h, &vi, res2, res );
    }
    /* if swscale is not available, change the range if necessary. This only applies to YUV-based CSPs however */
    if( avs_is_yuv( vi ) && opt->output_range != RANGE_AUTO && ((opt->input_range == RANGE_PC) != opt->output_range) )
    {
        const char *levels = opt->output_range ? "TV->PC" : "PC->TV";
        x264_cli_log( "avs", X264_LOG_WARNING, "performing %s conversion\n", levels );
        AVS_Value arg_arr[2];
        arg_arr[0] = res;
        arg_arr[1] = avs_new_value_string( levels );
        const char *arg_name[] = { NULL, "levels" };
        AVS_Value res2 = h->func.avs_invoke( h->env, "ColorYUV", avs_new_value_array( arg_arr, 2 ), arg_name );
        FAIL_IF_ERROR( avs_is_error( res2 ), "couldn't convert range: %s\n", avs_as_error( res2 ) );
        res = update_clip( h, &vi, res2, res );
        // notification that the input range has changed to the desired one
        opt->input_range = opt->output_range;
    }
#endif

    h->func.avs_release_value( res );

    info->width   = vi->width;
    info->height  = vi->height;
    info->fps_num = vi->fps_numerator;
    info->fps_den = vi->fps_denominator;
    h->num_frames = info->num_frames = vi->num_frames;
    info->thread_safe = 1;
    if( AVS_IS_RGB64( vi ) )
        info->csp = X264_CSP_BGRA | X264_CSP_VFLIP | X264_CSP_HIGH_DEPTH;
    else if( avs_is_rgb32( vi ) )
        info->csp = X264_CSP_BGRA | X264_CSP_VFLIP;
    else if( AVS_IS_RGB48( vi ) )
        info->csp = X264_CSP_BGR | X264_CSP_VFLIP | X264_CSP_HIGH_DEPTH;
    else if( avs_is_rgb24( vi ) )
        info->csp = X264_CSP_BGR | X264_CSP_VFLIP;
    else if( AVS_IS_YUV444P16( vi ) )
        info->csp = X264_CSP_I444 | X264_CSP_HIGH_DEPTH;
    else if( avs_is_yv24( vi ) )
        info->csp = X264_CSP_I444;
    else if( AVS_IS_YUV422P16( vi ) )
        info->csp = X264_CSP_I422 | X264_CSP_HIGH_DEPTH;
    else if( avs_is_yv16( vi ) )
        info->csp = X264_CSP_I422;
    else if( AVS_IS_YUV420P16( vi ) )
        info->csp = X264_CSP_I420 | X264_CSP_HIGH_DEPTH;
    else if( avs_is_yv12( vi ) )
        info->csp = X264_CSP_I420;
    else if( AVS_IS_Y16( vi ) )
        info->csp = X264_CSP_I400 | X264_CSP_HIGH_DEPTH;
    else if( avs_is_y8( vi ) )
        info->csp = X264_CSP_I400;
    else if( avs_is_yuy2( vi ) )
        info->csp = X264_CSP_YUYV;
#if HAVE_SWSCALE
    else if( avs_is_yv411( vi ) )
        info->csp = AV_PIX_FMT_YUV411P | X264_CSP_OTHER;
#endif
    else
    {
        AVS_Value pixel_type = h->func.avs_invoke( h->env, "PixelType", res, NULL );
        const char *pixel_type_name = avs_is_string( pixel_type ) ? avs_as_string( pixel_type ) : "unknown";
        FAIL_IF_ERROR( 1, "not supported pixel type: %s\n", pixel_type_name );
    }
    info->vfr = 0;

    *p_handle = h;
    return 0;
}