static int init( hnd_t *handle, cli_vid_filter_t *filter, video_info_t *info, x264_param_t *param, char *opt_string ) { int ret = 0; int change_fmt = (info->csp ^ param->i_csp) & X264_CSP_HIGH_DEPTH; int csp = ~(~info->csp ^ change_fmt); int bit_depth = 8*x264_cli_csp_depth_factor( csp ); if( opt_string ) { static const char * const optlist[] = { "bit_depth", NULL }; char **opts = x264_split_options( opt_string, optlist ); if( opts ) { char *str_bit_depth = x264_get_option( "bit_depth", opts ); bit_depth = x264_otoi( str_bit_depth, -1 ); ret = bit_depth < 8 || bit_depth > 16; csp = bit_depth > 8 ? csp | X264_CSP_HIGH_DEPTH : csp & ~X264_CSP_HIGH_DEPTH; change_fmt = (info->csp ^ csp) & X264_CSP_HIGH_DEPTH; free( opts ); } else ret = 1; } FAIL_IF_ERROR( bit_depth != BIT_DEPTH, "this filter supports only bit depth %d\n", BIT_DEPTH ); FAIL_IF_ERROR( ret, "unsupported bit depth conversion.\n" ); /* only add the filter to the chain if it's needed */ if( change_fmt || bit_depth != 8 * x264_cli_csp_depth_factor( csp ) ) { FAIL_IF_ERROR( !depth_filter_csp_is_supported(csp), "unsupported colorspace.\n" ); depth_hnd_t *h = x264_malloc( sizeof(depth_hnd_t) + (info->width+1)*sizeof(int16_t) ); if( !h ) return -1; h->error_buf = (int16_t*)(h + 1); h->dst_csp = csp; h->bit_depth = bit_depth; h->prev_hnd = *handle; h->prev_filter = *filter; if( x264_cli_pic_alloc( &h->buffer, h->dst_csp, info->width, info->height ) ) { x264_free( h ); return -1; } *handle = h; *filter = depth_filter; info->csp = h->dst_csp; } return 0; }
static int handle_opts( const char **optlist, char **opts, video_info_t *info, resizer_hnd_t *h ) { uint32_t out_sar_w, out_sar_h; char *str_width = x264_get_option( optlist[0], opts ); char *str_height = x264_get_option( optlist[1], opts ); char *str_sar = x264_get_option( optlist[2], opts ); char *fittobox = x264_get_option( optlist[3], opts ); char *str_csp = x264_get_option( optlist[4], opts ); int width = x264_otoi( str_width, -1 ); int height = x264_otoi( str_height, -1 ); int csp_only = 0; uint32_t in_sar_w = info->sar_width; uint32_t in_sar_h = info->sar_height; if( str_csp ) { /* output csp was specified, first check if optional depth was provided */ char *str_depth = strchr( str_csp, ':' ); int depth = x264_cli_csp_depth_factor( info->csp ) * 8; if( str_depth ) { /* csp bit depth was specified */ *str_depth++ = '\0'; depth = x264_otoi( str_depth, -1 ); FAIL_IF_ERROR( depth != 8 && depth != 16, "unsupported bit depth %d\n", depth ); } /* now lookup against the list of valid csps */ int csp; if( strlen( str_csp ) == 0 ) csp = info->csp & X264_CSP_MASK; else for( csp = X264_CSP_CLI_MAX-1; csp > X264_CSP_NONE; csp-- ) { if( x264_cli_csps[csp].name && !strcasecmp( x264_cli_csps[csp].name, str_csp ) ) break; } FAIL_IF_ERROR( csp == X264_CSP_NONE, "unsupported colorspace `%s'\n", str_csp ); h->dst_csp = csp; if( depth == 16 ) h->dst_csp |= X264_CSP_HIGH_DEPTH; } /* if the input sar is currently invalid, set it to 1:1 so it can be used in math */ if( !in_sar_w || !in_sar_h ) in_sar_w = in_sar_h = 1; if( str_sar ) { FAIL_IF_ERROR( 2 != sscanf( str_sar, "%u:%u", &out_sar_w, &out_sar_h ) && 2 != sscanf( str_sar, "%u/%u", &out_sar_w, &out_sar_h ), "invalid sar `%s'\n", str_sar ) } else
static int handle_opts( crop_hnd_t *h, video_info_t *info, char **opts, const char * const *optlist ) { for( int i = 0; i < 4; i++ ) { char *opt = x264_get_option( optlist[i], opts ); FAIL_IF_ERROR( !opt, "%s crop value not specified\n", optlist[i] ); h->dims[i] = x264_otoi( opt, -1 ); FAIL_IF_ERROR( h->dims[i] < 0, "%s crop value `%s' is less than 0\n", optlist[i], opt ); int dim_mod = i&1 ? (h->csp->mod_height << info->interlaced) : h->csp->mod_width; FAIL_IF_ERROR( h->dims[i] % dim_mod, "%s crop value `%s' is not a multiple of %d\n", optlist[i], opt, dim_mod ); } return 0; }
void xbee_ieee802154_set_csma_params_test(void* arg, struct modtest_result* result) { int ret = 0; const char buf[] = { 0x7E, 0x00, 0x04, 0x08, 0x01, 0x52, 0x4E, 0x56 }; const int count = 8; const char buf2[] = { 0x7E, 0x00, 0x04, 0x08, 0x01, 0x52, 0x52, 0x52 }; const int count2 = 8; struct sk_buff* send_buf = NULL; unsigned char* tail = NULL; struct xb_device* xbdev = NULL; struct xbee_sub_if_data *sdata = netdev_priv(xbdev->dev); xbdev = (struct xb_device*)arg; xbee_cfg802154_set_backoff_exponent(xbdev->phy, &sdata->wpan_dev, 2, 5); send_buf = alloc_skb(128, GFP_KERNEL); tail = skb_put(send_buf, count); memcpy(tail, buf, count); frameq_enqueue_send(&xbdev->send_queue, send_buf); send_buf = alloc_skb(128, GFP_KERNEL); tail = skb_put(send_buf, count); memcpy(tail, buf2, count2); frameq_enqueue_send(&xbdev->send_queue, send_buf); ret = xb_sendrecv(xbdev, xbdev->frameid); FAIL_IF_ERROR(ret); //FAIL_IF_NOT_EQ(1, skb_queue_len(&xbdev->send_queue)); //FAIL_IF_NOT_EQ(0, xbdev->recv_buf->len); // TODO inspect received data TEST_SUCCESS(); }
void xb_set_tx_power_test(void* arg, struct modtest_result* result) { int ret = 0; struct xb_device* xbdev = (struct xb_device*)arg; s32 power = 0; ret = xb_set_tx_power(xbdev, -1000); FAIL_IF_ERROR(ret); ret = xb_get_tx_power(xbdev, &power); FAIL_IF_ERROR(ret); FAIL_IF_NOT_EQ(-1000, power); TEST_SUCCESS(); }
void xb_set_pan_id_test(void* arg, struct modtest_result* result) { int ret = 0; struct xb_device* xbdev = (struct xb_device*)arg; __le16 panid = 0; ret = xb_set_pan_id(xbdev, 0xDBCA); FAIL_IF_ERROR(ret); ret = xb_get_pan_id(xbdev, &panid); FAIL_IF_ERROR(ret); FAIL_IF_NOT_EQ(0xDBCA, panid); TEST_SUCCESS(); }
static int read_frame_internal( cli_pic_t *pic, y4m_hnd_t *h ) { size_t slen = strlen( Y4M_FRAME_MAGIC ); int i = 0; char header[16]; /* Read frame header - without terminating '\n' */ if( fread( header, 1, slen, h->fh ) != slen ) return -1; header[slen] = 0; FAIL_IF_ERROR( strncmp( header, Y4M_FRAME_MAGIC, slen ), "bad header magic (%"PRIx32" <=> %s)\n", M32(header), header ) /* Skip most of it */ while( i < MAX_FRAME_HEADER && fgetc( h->fh ) != '\n' ) i++; FAIL_IF_ERROR( i == MAX_FRAME_HEADER, "bad frame header!\n" ) h->frame_size = h->frame_size - h->frame_header_len + i+slen+1; h->frame_header_len = i+slen+1; int error = 0; for( i = 0; i < pic->img.planes && !error; i++ ) error |= fread( pic->img.plane[i], h->plane_size[i], 1, h->fh ) <= 0; return error; }
void xb_set_cca_ed_level_test(void* arg, struct modtest_result* result) { int ret = 0; struct xb_device* xbdev = (struct xb_device*)arg; s32 ed_level = 0; ret = xb_set_cca_ed_level(xbdev, -5000); FAIL_IF_ERROR(ret); ret = xb_get_cca_ed_level(xbdev, &ed_level); FAIL_IF_ERROR(ret); FAIL_IF_NOT_EQ(-5000, ed_level); TEST_SUCCESS(); }
void xb_set_short_addr_test(void* arg, struct modtest_result* result) { int ret = 0; struct xb_device* xbdev = (struct xb_device*)arg; __le16 short_addr = 0; ret = xb_set_short_addr(xbdev, 0xAAAA); FAIL_IF_ERROR(ret); ret = xb_get_short_addr(xbdev, &short_addr); FAIL_IF_ERROR(ret); FAIL_IF_NOT_EQ(0xAAAA, short_addr); TEST_SUCCESS(); }
void xb_set_ackreq_default_test(void* arg, struct modtest_result* result) { int ret = 0; struct xb_device* xbdev = (struct xb_device*)arg; bool ackreq = 0; ret = xb_set_ackreq_default(xbdev, true); FAIL_IF_ERROR(ret); ret = xb_get_ackreq_default(xbdev, &ackreq); FAIL_IF_ERROR(ret); FAIL_IF_NOT_EQ(true, ackreq); TEST_SUCCESS(); }
static float get_avs_version( avs_hnd_t *h ) { /* AvxSynth has its version defined starting at 4.0, even though it's based on AviSynth 2.5.8. This is troublesome for get_avs_version and working around the new colorspaces in 2.6. So if AvxSynth is detected, explicitly define the version as 2.58. */ #if USE_AVXSYNTH return 2.58f; #else FAIL_IF_ERROR( !h->func.avs_function_exists( h->env, "VersionNumber" ), "VersionNumber does not exist\n" ); AVS_Value ver = h->func.avs_invoke( h->env, "VersionNumber", avs_new_value_array( NULL, 0 ), NULL ); FAIL_IF_ERROR( avs_is_error( ver ), "unable to determine avisynth version: %s\n", avs_as_error( ver ) ); FAIL_IF_ERROR( !avs_is_float( ver ), "VersionNumber did not return a float value\n" ); float ret = avs_as_float( ver ); h->func.avs_release_value( ver ); return ret; #endif }
static int open_file( char *psz_filename, hnd_t *p_handle, video_info_t *info, cli_input_opt_t *opt ) { raw_hnd_t *h = (raw_hnd_t*)malloc( sizeof(raw_hnd_t) ); if( !h ) return -1; if( !opt->resolution ) { /* try to parse the file name */ for( char *p = psz_filename; *p; p++ ) if( *p >= '0' && *p <= '9' && sscanf( p, "%ux%u", &info->width, &info->height ) == 2 ) break; } else sscanf( opt->resolution, "%ux%u", &info->width, &info->height ); FAIL_IF_ERROR( !info->width || !info->height, "raw input requires a resolution.\n" ) if( opt->colorspace ) { for( info->csp = X264_CSP_CLI_MAX-1; x264_cli_csps[info->csp].name && strcasecmp( x264_cli_csps[info->csp].name, opt->colorspace ); ) info->csp--; FAIL_IF_ERROR( info->csp == X264_CSP_NONE, "unsupported colorspace `%s'\n", opt->colorspace ); } else /* default */ info->csp = X264_CSP_I420; h->next_frame = 0; info->vfr = 0; if( !strcmp( psz_filename, "-" ) ) h->fh = stdin; else h->fh = fopen( psz_filename, "rb" ); if( h->fh == NULL ) return -1; info->thread_safe = 1; info->num_frames = 0; h->frame_size = 0; for( int i = 0; i < x264_cli_csps[info->csp].planes; i++ ) { h->plane_size[i] = x264_cli_pic_plane_size( info->csp, info->width, info->height, i ); h->frame_size += h->plane_size[i]; } if( x264_is_regular_file( h->fh ) ) { fseek( h->fh, 0, SEEK_END ); uint64_t size = ftell( h->fh ); fseek( h->fh, 0, SEEK_SET ); info->num_frames = size / h->frame_size; } *p_handle = h; return 0; }
void xb_set_channel_test(void* arg, struct modtest_result* result) { int ret = 0; struct xb_device* xbdev = (struct xb_device*)arg; u8 page = 0; u8 channel = 0; ret = xb_set_channel(xbdev, 0, 20); FAIL_IF_ERROR(ret); ret = xb_get_channel(xbdev, &page, &channel); FAIL_IF_ERROR(ret); FAIL_IF_NOT_EQ(0, page); FAIL_IF_NOT_EQ(20, channel); TEST_SUCCESS(); }
int x264_cli_pic_copy( cli_pic_t *out, cli_pic_t *in ) { int csp = in->img.csp & X264_CSP_MASK; FAIL_IF_ERROR( x264_cli_csp_is_invalid( in->img.csp ), "invalid colorspace arg %d\n", in->img.csp ) FAIL_IF_ERROR( in->img.csp != out->img.csp || in->img.height != out->img.height || in->img.width != out->img.width, "incompatible frame properties\n" ); /* copy data */ out->duration = in->duration; out->pts = in->pts; out->opaque = in->opaque; for( int i = 0; i < out->img.planes; i++ ) { int height = in->img.height * x264_cli_csps[csp].height[i]; int width = in->img.width * x264_cli_csps[csp].width[i]; x264_cli_plane_copy( out->img.plane[i], out->img.stride[i], in->img.plane[i], in->img.stride[i], width, height ); } return 0; }
void xb_set_backoff_exponent_test(void* arg, struct modtest_result* result) { int ret = 0; struct xb_device* xbdev = (struct xb_device*)arg; u8 min_be = 0; u8 max_be = 0; ret = xb_set_backoff_exponent(xbdev, 0, 20); FAIL_IF_ERROR(ret); ret = xb_get_channel(xbdev, &min_be, &max_be); FAIL_IF_ERROR(ret); FAIL_IF_NOT_EQ(0, min_be); FAIL_IF_NOT_EQ(20, max_be); TEST_SUCCESS(); }
static int init( hnd_t *handle, cli_vid_filter_t *filter, video_info_t *info, x264_param_t *param, char *opt_string ) { FAIL_IF_ERROR( x264_cli_csp_is_invalid( info->csp ), "invalid csp %d\n", info->csp ); crop_hnd_t *h = calloc( 1, sizeof(crop_hnd_t) ); if( !h ) return -1; h->csp = x264_cli_get_csp( info->csp ); static const char * const optlist[] = { "left", "top", "right", "bottom", NULL }; char **opts = x264_split_options( opt_string, optlist ); if( !opts ) return -1; int err = handle_opts( h, info, opts, optlist ); free( opts ); if( err ) return -1; h->dims[2] = info->width - h->dims[0] - h->dims[2]; h->dims[3] = info->height - h->dims[1] - h->dims[3]; FAIL_IF_ERROR( h->dims[2] <= 0 || h->dims[3] <= 0, "invalid output resolution %dx%d\n", h->dims[2], h->dims[3] ); if( info->width != h->dims[2] || info->height != h->dims[3] ) x264_cli_log( NAME, X264_LOG_INFO, "cropping to %dx%d\n", h->dims[2], h->dims[3] ); else { /* do nothing as the user supplied 0s for all the values */ free( h ); return 0; } /* done initializing, overwrite values */ info->width = h->dims[2]; info->height = h->dims[3]; h->prev_filter = *filter; h->prev_hnd = *handle; *handle = h; *filter = crop_filter; return 0; }
void xb_get_extended_addr_test(void* arg, struct modtest_result* result) { int ret = 0; struct xb_device* xbdev = (struct xb_device*)arg; __le64 extended_adder = 0; ret = xb_get_extended_addr(xbdev, &extended_adder); FAIL_IF_ERROR(ret); FAIL_IF_NOT_EQ(xbee_serialno, extended_adder); TEST_SUCCESS(); }
static int read_frame( cli_pic_t *pic, hnd_t handle, int i_frame ) { static const int plane[3] = { AVS_PLANAR_Y, AVS_PLANAR_U, AVS_PLANAR_V }; avs_hnd_t *h = handle; if( i_frame >= h->num_frames ) return -1; AVS_VideoFrame *frm = pic->opaque = h->func.avs_get_frame( h->clip, i_frame ); const char *err = h->func.avs_clip_get_error( h->clip ); FAIL_IF_ERROR( err, "%s occurred while reading frame %d\n", err, i_frame ); for( int i = 0; i < pic->img.planes; i++ ) { /* explicitly cast away the const attribute to avoid a warning */ pic->img.plane[i] = (uint8_t*)avs_get_read_ptr_p( frm, plane[i] ); pic->img.stride[i] = avs_get_pitch_p( frm, plane[i] ); } return 0; }
void buffer_escape_exampe(void* arg, struct modtest_result* result) { char buf[] = { 0x7E, 0x00, 0x02, 0x23, 0x11, 0xCB, 0x00 }; const char escbuf[] = { 0x7E, 0x00, 0x02, 0x23, 0x7D, 0x31, 0xCB }; size_t esclen = 0; int err; //pr_debug("bufsize %lu", sizeof(buf) ); esclen = buffer_escaped_len(buf, 5); err = buffer_escape(buf, 5, esclen); print_hex_dump_bytes("buf: ", DUMP_PREFIX_NONE, buf, 6); print_hex_dump_bytes("esc: ", DUMP_PREFIX_NONE, escbuf, 6); FAIL_IF_ERROR(err); FAIL_IF_NOT_EQ(0, memcmp(escbuf, buf, esclen) ); TEST_SUCCESS(); }
static int read_frame_internal( cli_pic_t *pic, y4m_hnd_t *h ) { size_t slen = strlen( Y4M_FRAME_MAGIC ); int pixel_depth = x264_cli_csp_depth_factor( pic->img.csp ); int i = 0; char header[16]; /* Read frame header - without terminating '\n' */ if( fread( header, 1, slen, h->fh ) != slen ) return -1; header[slen] = 0; FAIL_IF_ERROR( strncmp( header, Y4M_FRAME_MAGIC, slen ), "bad header magic (%"PRIx32" <=> %s)\n", M32(header), header ) /* Skip most of it */ while( i < MAX_FRAME_HEADER && fgetc( h->fh ) != '\n' ) i++; FAIL_IF_ERROR( i == MAX_FRAME_HEADER, "bad frame header!\n" ) h->frame_size = h->frame_size - h->frame_header_len + i+slen+1; h->frame_header_len = i+slen+1; int error = 0; for( i = 0; i < pic->img.planes && !error; i++ ) { error |= fread( pic->img.plane[i], pixel_depth, h->plane_size[i], h->fh ) != h->plane_size[i]; if( h->bit_depth & 7 ) { /* upconvert non 16bit high depth planes to 16bit using the same * algorithm as used in the depth filter. */ uint16_t *plane = (uint16_t*)pic->img.plane[i]; uint64_t pixel_count = h->plane_size[i]; int lshift = 16 - h->bit_depth; for( uint64_t j = 0; j < pixel_count; j++ ) plane[j] = plane[j] << lshift; } } return error; }
static int read_frame_internal( cli_pic_t *pic, y4m_hnd_t *h ) { size_t slen = strlen( Y4M_FRAME_MAGIC ); int i = 0; char header[16]; /* Read frame header - without terminating '\n' */ if( fread( header, 1, slen, h->fh ) != slen ) return -1; header[slen] = 0; FAIL_IF_ERROR( strncmp( header, Y4M_FRAME_MAGIC, slen ), "bad header magic (%"PRIx32" <=> %s)\n", M32(header), header ) /* Skip most of it */ while( i < MAX_FRAME_HEADER && fgetc( h->fh ) != '\n' ) i++; FAIL_IF_ERROR( i == MAX_FRAME_HEADER, "bad frame header!\n" ) h->frame_size = h->frame_size - h->frame_header_len + i+slen+1; h->frame_header_len = i+slen+1; return read_picture_with_correct_bit_depth( pic, h->handler ); }
static int handle_opts( const char **optlist, char **opts, video_info_t *info, resizer_hnd_t *h ) { uint32_t out_sar_w, out_sar_h; char *str_width = x264_get_option( optlist[0], opts ); char *str_height = x264_get_option( optlist[1], opts ); char *str_sar = x264_get_option( optlist[2], opts ); char *fittobox = x264_get_option( optlist[3], opts ); char *str_csp = x264_get_option( optlist[4], opts ); int width = x264_otoi( str_width, -1 ); int height = x264_otoi( str_height, -1 ); int csp_only = 0; uint32_t in_sar_w = info->sar_width; uint32_t in_sar_h = info->sar_height; if( str_csp ) { /* output csp was specified, lookup against valid values */ int csp; for( csp = X264_CSP_CLI_MAX-1; x264_cli_csps[csp].name && strcasecmp( x264_cli_csps[csp].name, str_csp ); ) csp--; FAIL_IF_ERROR( csp == X264_CSP_NONE, "unsupported colorspace `%s'\n", str_csp ); h->dst_csp = csp; } /* if the input sar is currently invalid, set it to 1:1 so it can be used in math */ if( !in_sar_w || !in_sar_h ) in_sar_w = in_sar_h = 1; if( str_sar ) { FAIL_IF_ERROR( 2 != sscanf( str_sar, "%u:%u", &out_sar_w, &out_sar_h ) && 2 != sscanf( str_sar, "%u/%u", &out_sar_w, &out_sar_h ), "invalid sar `%s'\n", str_sar ) } else
static int open_file( char *psz_filename, hnd_t *p_handle, video_info_t *info, cli_input_opt_t *opt ) { FILE *fh = x264_fopen( psz_filename, "r" ); if( !fh ) return -1; int b_regular = x264_is_regular_file( fh ); fclose( fh ); FAIL_IF_ERROR( !b_regular, "AVS input is incompatible with non-regular file `%s'\n", psz_filename ); avs_hnd_t *h = calloc( 1, sizeof(avs_hnd_t) ); if( !h ) return -1; FAIL_IF_ERROR( custom_avs_load_library( h ), "failed to load avisynth\n" ); h->env = h->func.avs_create_script_environment( AVS_INTERFACE_25 ); if( h->func.avs_get_error ) { const char *error = h->func.avs_get_error( h->env ); FAIL_IF_ERROR( error, "%s\n", error ); } float avs_version = get_avs_version( h ); if( avs_version <= 0 ) return -1; x264_cli_log( "avs", X264_LOG_DEBUG, "using avisynth version %.2f\n", avs_version ); #ifdef _WIN32 /* Avisynth doesn't support Unicode filenames. */ char ansi_filename[MAX_PATH]; FAIL_IF_ERROR( !x264_ansi_filename( psz_filename, ansi_filename, MAX_PATH, 0 ), "invalid ansi filename\n" ); AVS_Value arg = avs_new_value_string( ansi_filename ); #else AVS_Value arg = avs_new_value_string( psz_filename ); #endif AVS_Value res; char *filename_ext = get_filename_extension( psz_filename ); if( !strcasecmp( filename_ext, "avs" ) ) { res = h->func.avs_invoke( h->env, "Import", arg, NULL ); FAIL_IF_ERROR( avs_is_error( res ), "%s\n", avs_as_error( res ) ); /* check if the user is using a multi-threaded script and apply distributor if necessary. adapted from avisynth's vfw interface */ AVS_Value mt_test = h->func.avs_invoke( h->env, "GetMTMode", avs_new_value_bool( 0 ), NULL ); int mt_mode = avs_is_int( mt_test ) ? avs_as_int( mt_test ) : 0; h->func.avs_release_value( mt_test ); if( mt_mode > 0 && mt_mode < 5 ) { AVS_Value temp = h->func.avs_invoke( h->env, "Distributor", res, NULL ); h->func.avs_release_value( res ); res = temp; } } else /* non script file */ { /* cycle through known source filters to find one that works */ const char *filter[AVS_MAX_SEQUENCE+1] = { 0 }; avs_build_filter_sequence( filename_ext, filter ); int i; for( i = 0; filter[i]; i++ ) { x264_cli_log( "avs", X264_LOG_INFO, "trying %s... ", filter[i] ); if( !h->func.avs_function_exists( h->env, filter[i] ) ) { x264_cli_printf( X264_LOG_INFO, "not found\n" ); continue; } if( !strncasecmp( filter[i], "FFmpegSource", 12 ) ) { x264_cli_printf( X264_LOG_INFO, "indexing... " ); fflush( stderr ); } res = h->func.avs_invoke( h->env, filter[i], arg, NULL ); if( !avs_is_error( res ) ) { x264_cli_printf( X264_LOG_INFO, "succeeded\n" ); break; } x264_cli_printf( X264_LOG_INFO, "failed\n" ); } FAIL_IF_ERROR( !filter[i], "unable to find source filter to open `%s'\n", psz_filename ); } FAIL_IF_ERROR( !avs_is_clip( res ), "`%s' didn't return a video clip\n", psz_filename ); h->clip = h->func.avs_take_clip( res, h->env ); const AVS_VideoInfo *vi = h->func.avs_get_video_info( h->clip ); FAIL_IF_ERROR( !avs_has_video( vi ), "`%s' has no video data\n", psz_filename ); /* if the clip is made of fields instead of frames, call weave to make them frames */ if( avs_is_field_based( vi ) ) { x264_cli_log( "avs", X264_LOG_WARNING, "detected fieldbased (separated) input, weaving to frames\n" ); AVS_Value tmp = h->func.avs_invoke( h->env, "Weave", res, NULL ); FAIL_IF_ERROR( avs_is_error( tmp ), "couldn't weave fields into frames: %s\n", avs_as_error( tmp ) ); res = update_clip( h, &vi, tmp, res ); info->interlaced = 1; info->tff = avs_is_tff( vi ); } #if !HAVE_SWSCALE /* if swscale is not available, convert the CSP if necessary */ FAIL_IF_ERROR( avs_version < 2.6f && (opt->output_csp == X264_CSP_I400 || opt->output_csp == X264_CSP_I422 || opt->output_csp == X264_CSP_I444), "avisynth >= 2.6 is required for i400/i422/i444 output\n" ); if( (opt->output_csp == X264_CSP_I400 && !AVS_IS_Y( vi )) || (opt->output_csp == X264_CSP_I420 && !AVS_IS_420( vi )) || (opt->output_csp == X264_CSP_I422 && !AVS_IS_422( vi )) || (opt->output_csp == X264_CSP_I444 && !AVS_IS_444( vi )) || (opt->output_csp == X264_CSP_RGB && !avs_is_rgb( vi )) ) { const char *csp; if( AVS_IS_AVISYNTHPLUS ) { csp = opt->output_csp == X264_CSP_I400 ? "Y" : opt->output_csp == X264_CSP_I420 ? "YUV420" : opt->output_csp == X264_CSP_I422 ? "YUV422" : opt->output_csp == X264_CSP_I444 ? "YUV444" : "RGB"; } else { csp = opt->output_csp == X264_CSP_I400 ? "Y8" : opt->output_csp == X264_CSP_I420 ? "YV12" : opt->output_csp == X264_CSP_I422 ? "YV16" : opt->output_csp == X264_CSP_I444 ? "YV24" : "RGB"; } x264_cli_log( "avs", X264_LOG_WARNING, "converting input clip to %s\n", csp ); if( opt->output_csp != X264_CSP_I400 ) { FAIL_IF_ERROR( opt->output_csp < X264_CSP_I444 && (vi->width&1), "input clip width not divisible by 2 (%dx%d)\n", vi->width, vi->height ); FAIL_IF_ERROR( opt->output_csp == X264_CSP_I420 && info->interlaced && (vi->height&3), "input clip height not divisible by 4 (%dx%d)\n", vi->width, vi->height ); FAIL_IF_ERROR( (opt->output_csp == X264_CSP_I420 || info->interlaced) && (vi->height&1), "input clip height not divisible by 2 (%dx%d)\n", vi->width, vi->height ); } char conv_func[16]; snprintf( conv_func, sizeof(conv_func), "ConvertTo%s", csp ); AVS_Value arg_arr[3]; const char *arg_name[3]; int arg_count = 1; arg_arr[0] = res; arg_name[0] = NULL; if( opt->output_csp != X264_CSP_I400 ) { arg_arr[arg_count] = avs_new_value_bool( info->interlaced ); arg_name[arg_count] = "interlaced"; arg_count++; } /* if doing a rgb <-> yuv conversion then range is handled via 'matrix'. though it's only supported in 2.56+ */ char matrix[7]; if( avs_version >= 2.56f && ((opt->output_csp == X264_CSP_RGB && avs_is_yuv( vi )) || (opt->output_csp != X264_CSP_RGB && avs_is_rgb( vi ))) ) { // if converting from yuv, then we specify the matrix for the input, otherwise use the output's. int use_pc_matrix = avs_is_yuv( vi ) ? opt->input_range == RANGE_PC : opt->output_range == RANGE_PC; snprintf( matrix, sizeof(matrix), "%s601", use_pc_matrix ? "PC." : "Rec" ); /* FIXME: use correct coefficients */ arg_arr[arg_count] = avs_new_value_string( matrix ); arg_name[arg_count] = "matrix"; arg_count++; // notification that the input range has changed to the desired one opt->input_range = opt->output_range; } AVS_Value res2 = h->func.avs_invoke( h->env, conv_func, avs_new_value_array( arg_arr, arg_count ), arg_name ); FAIL_IF_ERROR( avs_is_error( res2 ), "couldn't convert input clip to %s: %s\n", csp, avs_as_error( res2 ) ); res = update_clip( h, &vi, res2, res ); } /* if swscale is not available, change the range if necessary. This only applies to YUV-based CSPs however */ if( avs_is_yuv( vi ) && opt->output_range != RANGE_AUTO && ((opt->input_range == RANGE_PC) != opt->output_range) ) { const char *levels = opt->output_range ? "TV->PC" : "PC->TV"; x264_cli_log( "avs", X264_LOG_WARNING, "performing %s conversion\n", levels ); AVS_Value arg_arr[2]; arg_arr[0] = res; arg_arr[1] = avs_new_value_string( levels ); const char *arg_name[] = { NULL, "levels" }; AVS_Value res2 = h->func.avs_invoke( h->env, "ColorYUV", avs_new_value_array( arg_arr, 2 ), arg_name ); FAIL_IF_ERROR( avs_is_error( res2 ), "couldn't convert range: %s\n", avs_as_error( res2 ) ); res = update_clip( h, &vi, res2, res ); // notification that the input range has changed to the desired one opt->input_range = opt->output_range; } #endif h->func.avs_release_value( res ); info->width = vi->width; info->height = vi->height; info->fps_num = vi->fps_numerator; info->fps_den = vi->fps_denominator; h->num_frames = info->num_frames = vi->num_frames; info->thread_safe = 1; if( AVS_IS_RGB64( vi ) ) info->csp = X264_CSP_BGRA | X264_CSP_VFLIP | X264_CSP_HIGH_DEPTH; else if( avs_is_rgb32( vi ) ) info->csp = X264_CSP_BGRA | X264_CSP_VFLIP; else if( AVS_IS_RGB48( vi ) ) info->csp = X264_CSP_BGR | X264_CSP_VFLIP | X264_CSP_HIGH_DEPTH; else if( avs_is_rgb24( vi ) ) info->csp = X264_CSP_BGR | X264_CSP_VFLIP; else if( AVS_IS_YUV444P16( vi ) ) info->csp = X264_CSP_I444 | X264_CSP_HIGH_DEPTH; else if( avs_is_yv24( vi ) ) info->csp = X264_CSP_I444; else if( AVS_IS_YUV422P16( vi ) ) info->csp = X264_CSP_I422 | X264_CSP_HIGH_DEPTH; else if( avs_is_yv16( vi ) ) info->csp = X264_CSP_I422; else if( AVS_IS_YUV420P16( vi ) ) info->csp = X264_CSP_I420 | X264_CSP_HIGH_DEPTH; else if( avs_is_yv12( vi ) ) info->csp = X264_CSP_I420; else if( AVS_IS_Y16( vi ) ) info->csp = X264_CSP_I400 | X264_CSP_HIGH_DEPTH; else if( avs_is_y8( vi ) ) info->csp = X264_CSP_I400; else if( avs_is_yuy2( vi ) ) info->csp = X264_CSP_YUYV; #if HAVE_SWSCALE else if( avs_is_yv411( vi ) ) info->csp = AV_PIX_FMT_YUV411P | X264_CSP_OTHER; #endif else { AVS_Value pixel_type = h->func.avs_invoke( h->env, "PixelType", res, NULL ); const char *pixel_type_name = avs_is_string( pixel_type ) ? avs_as_string( pixel_type ) : "unknown"; FAIL_IF_ERROR( 1, "not supported pixel type: %s\n", pixel_type_name ); } info->vfr = 0; *p_handle = h; return 0; }
static int open_file( char *psz_filename, hnd_t *p_handle, video_info_t *info, cli_input_opt_t *opt ) { vs_hnd_t *h = new vs_hnd_t; if( !h ) return -1; FILE *fh = x264_fopen(psz_filename, "rb"); if (!fh) return -1; int b_regular = x264_is_regular_file(fh); fclose(fh); FAIL_IF_ERROR(!b_regular, "VS input is incompatible with non-regular file `%s'\n", psz_filename); FAIL_IF_ERROR(!vsscript_init(), "Failed to initialize VapourSynth environment\n"); h->vsapi = vsscript_getVSApi(); if (!h->vsapi) { fprintf(stderr, "Failed to get VapourSynth API pointer\n"); vsscript_finalize(); return -1; } // Should always succeed if (vsscript_createScript(&h->se)) { fprintf(stderr, "Script environment initialization failed:\n%s\n", vsscript_getError(h->se)); vsscript_freeScript(h->se); vsscript_finalize(); return -1; } std::string strfilename = psz_filename; nstring scriptFilename = s2ws(strfilename); if (vsscript_evaluateFile(&h->se, nstringToUtf8(scriptFilename).c_str(), efSetWorkingDir)) { fprintf(stderr, "Script evaluation failed:\n%s\n", vsscript_getError(h->se)); vsscript_freeScript(h->se); vsscript_finalize(); return -1; } h->node = vsscript_getOutput(h->se, 0);//outputIndex if (!h->node) { fprintf(stderr, "Failed to retrieve output node. Invalid index specified?\n"); vsscript_freeScript(h->se); vsscript_finalize(); return -1; } const VSCoreInfo *vsInfo = h->vsapi->getCoreInfo(vsscript_getCore(h->se)); h->sea = new semaphore(vsInfo->numThreads); const VSVideoInfo *vi = h->vsapi->getVideoInfo(h->node); if (vi->format->colorFamily != cmYUV) { fprintf(stderr, "Can only read YUV format clips"); h->vsapi->freeNode(h->node); vsscript_freeScript(h->se); vsscript_finalize(); return -1; } if (!isConstantFormat(vi)) { fprintf(stderr, "Cannot output clips with varying dimensions\n"); h->vsapi->freeNode(h->node); vsscript_freeScript(h->se); vsscript_finalize(); return -1; } info->width = vi->width; info->height = vi->height; info->fps_num = vi->fpsNum; info->fps_den = vi->fpsDen; info->thread_safe = 1; info->num_frames = vi->numFrames; if (vi->format->subSamplingW == 1 && vi->format->subSamplingH == 1) info->csp = X264_CSP_I420; else if (vi->format->subSamplingW == 1 && vi->format->subSamplingH == 0) info->csp = X264_CSP_I422; else if (vi->format->subSamplingW == 0 && vi->format->subSamplingH == 0) info->csp = X264_CSP_I444; h->bit_depth = vi->format->bitsPerSample; if (h->bit_depth > 8) { info->csp |= X264_CSP_HIGH_DEPTH; } *p_handle = (void*)h; return 0; }
static int init( hnd_t *handle, cli_vid_filter_t *filter, video_info_t *info, x264_param_t *param, char *opt_string ) { selvry_hnd_t *h = malloc( sizeof(selvry_hnd_t) ); if( !h ) return -1; h->pattern_len = 0; h->step_size = 0; int offsets[MAX_PATTERN_SIZE]; for( char *tok, *p = opt_string; (tok = strtok( p, "," )); p = NULL ) { int val = x264_otoi( tok, -1 ); if( p ) { FAIL_IF_ERROR( val <= 0, "invalid step `%s'\n", tok ) h->step_size = val; continue; } FAIL_IF_ERROR( val < 0 || val >= h->step_size, "invalid offset `%s'\n", tok ) FAIL_IF_ERROR( h->pattern_len >= MAX_PATTERN_SIZE, "max pattern size %d reached\n", MAX_PATTERN_SIZE ) offsets[h->pattern_len++] = val; } FAIL_IF_ERROR( !h->step_size, "no step size provided\n" ) FAIL_IF_ERROR( !h->pattern_len, "no offsets supplied\n" ) h->pattern = malloc( h->pattern_len * sizeof(int) ); if( !h->pattern ) return -1; memcpy( h->pattern, offsets, h->pattern_len * sizeof(int) ); /* determine required cache size to maintain pattern. */ intptr_t max_rewind = 0; int min = h->step_size; for( int i = h->pattern_len-1; i >= 0; i-- ) { min = X264_MIN( min, offsets[i] ); if( i ) max_rewind = X264_MAX( max_rewind, offsets[i-1] - min + 1 ); /* reached maximum rewind size */ if( max_rewind == h->step_size ) break; } if( x264_init_vid_filter( "cache", handle, filter, info, param, (void*)max_rewind ) ) return -1; /* done initing, overwrite properties */ if( h->step_size != h->pattern_len ) { info->num_frames = (uint64_t)info->num_frames * h->pattern_len / h->step_size; info->fps_den *= h->step_size; info->fps_num *= h->pattern_len; x264_reduce_fraction( &info->fps_num, &info->fps_den ); if( info->vfr ) { info->timebase_den *= h->pattern_len; info->timebase_num *= h->step_size; x264_reduce_fraction( &info->timebase_num, &info->timebase_den ); } } h->pts = 0; h->vfr = info->vfr; h->prev_filter = *filter; h->prev_hnd = *handle; *filter = select_every_filter; *handle = h; return 0; }
/*! ****************************************************************************** @Function SGXUT_query_sgx_corerev @Description Get the core revision and DDK build from the ukernel @Input *psSGXDevData : device data for SGX-type device @Return void ******************************************************************************/ static IMG_VOID SGXUT_query_sgx_corerev(PVRSRV_DEV_DATA *psSGXDevData) { SGX_MISC_INFO sSGXMiscInfo; PVRSRV_ERROR eResult; IMG_UINT32 ui32CoreRev, ui32CoreId, ui32CoreIdSW, ui32CoreRevSW; IMG_UINT32 ui32DDKVersion, ui32DDKBuild, ui32BuildOptions; IMG_CHAR acValues[4], acSGXCoreNamedId[16]; /* * Get the host driver DDK version (c.f. ukernel DDK version below) */ DPF("Misc Info API: Query the SGX features from host driver\n"); sSGXMiscInfo.eRequest = SGX_MISC_INFO_REQUEST_DRIVER_SGXREV; eResult = SGXGetMiscInfo(psSGXDevData, &sSGXMiscInfo); FAIL_IF_ERROR(eResult); ui32DDKVersion = sSGXMiscInfo.uData.sSGXFeatures.ui32DDKVersion; ui32DDKBuild = sSGXMiscInfo.uData.sSGXFeatures.ui32DDKBuild; SGXUT_decode_dword(ui32DDKVersion, acValues); DPF(".... SGX Host driver DDK version: %d.%d.%d.%d\n", acValues[2], acValues[1], acValues[0], ui32DDKBuild); #if defined(PVRSRV_USSE_EDM_STATUS_DEBUG) DPF(".... SGX Microkernel status buffer location: CPUVAddr: 0x%08X, DevVAddr: 0x%08X\n", (IMG_UINTPTR_T)sSGXMiscInfo.uData.sSGXFeatures.pvEDMStatusBuffer, sSGXMiscInfo.uData.sSGXFeatures.sDevVAEDMStatusBuffer.uiAddr); #endif /* * Query: request SGX core revision, DDK version and build options * from the microkernel */ DPF("Misc Info API: Query the SGX features from microkernel\n"); sSGXMiscInfo.eRequest = SGX_MISC_INFO_REQUEST_SGXREV; /* Submit the query */ eResult = SGXGetMiscInfo(psSGXDevData, &sSGXMiscInfo); FAIL_IF_ERROR(eResult); ui32CoreRev = sSGXMiscInfo.uData.sSGXFeatures.ui32CoreRev; ui32CoreId = sSGXMiscInfo.uData.sSGXFeatures.ui32CoreID >> 16; ui32DDKVersion = sSGXMiscInfo.uData.sSGXFeatures.ui32DDKVersion; ui32DDKBuild = sSGXMiscInfo.uData.sSGXFeatures.ui32DDKBuild; ui32CoreIdSW = sSGXMiscInfo.uData.sSGXFeatures.ui32CoreIdSW; ui32CoreRevSW = sSGXMiscInfo.uData.sSGXFeatures.ui32CoreRevSW; ui32BuildOptions = sSGXMiscInfo.uData.sSGXFeatures.ui32BuildOptions; /* Decode and print output for HW data (from registers) */ SGXUT_decode_dword(ui32CoreRev, acValues); DPF(".... Hardware core designer: %d, HW core revision: %d.%d.%d\n", acValues[3], acValues[2], acValues[1], acValues[0]); /* Identify SGX product from core ID */ switch (ui32CoreId) { case 0x0112U: sprintf(acSGXCoreNamedId, "SGX 520/530"); break; case 0x0113U: sprintf(acSGXCoreNamedId, "SGX 535"); break; case 0x0114U: sprintf(acSGXCoreNamedId, "SGX 540"); break; case 0x0115U: sprintf(acSGXCoreNamedId, "SGX 545"); break; case 0x0116U: sprintf(acSGXCoreNamedId, "SGX 531"); break; case 0x0118U: sprintf(acSGXCoreNamedId, "SGX 54x-MP"); break; default: strcpy(acSGXCoreNamedId, ""); } if( strcmp(acSGXCoreNamedId, "") != 0) { DPF(".... Hardware core ID: %x, name: %s\n", ui32CoreId, acSGXCoreNamedId); } else { DPF(".... Hardware core ID: %x\n", ui32CoreId); } /* Decode and print output for software data (from SGX microkernel) */ SGXUT_decode_dword(ui32DDKVersion, acValues); DPF(".... SGX microkernel DDK version: %d.%d.%d.%d\n", acValues[2], acValues[1], acValues[0], ui32DDKBuild); DPF(".... SGX microkernel software core ID: SGX %d, revision: %x\n", ui32CoreIdSW, ui32CoreRevSW); /* Commonly used debug options (from ukernel) */ DPF("SGX microkernel build options\n"); DPF(".... DEBUG: "); SGXUT_decode_build_option(ui32BuildOptions, DEBUG_SET_OFFSET); DPF(".... PDUMP: "); SGXUT_decode_build_option(ui32BuildOptions, PDUMP_SET_OFFSET); DPF(".... PVRSRV_USSE_EDM_STATUS_DEBUG: "); SGXUT_decode_build_option(ui32BuildOptions, PVRSRV_USSE_EDM_STATUS_DEBUG_SET_OFFSET); DPF(".... SUPPORT_HW_RECOVERY: "); SGXUT_decode_build_option(ui32BuildOptions, SUPPORT_HW_RECOVERY_SET_OFFSET); /* Other options in no particular order */ DPF(".... PVR_SECURE_HANDLES: "); SGXUT_decode_build_option(ui32BuildOptions, PVR_SECURE_HANDLES_SET_OFFSET); DPF(".... SGX_BYPASS_SYSTEM_CACHE: "); SGXUT_decode_build_option(ui32BuildOptions, SGX_BYPASS_SYSTEM_CACHE_SET_OFFSET); DPF(".... SGX_DMS_AGE_ENABLE: "); SGXUT_decode_build_option(ui32BuildOptions, SGX_DMS_AGE_ENABLE_SET_OFFSET); DPF(".... SGX_FAST_DPM_INIT: "); SGXUT_decode_build_option(ui32BuildOptions, SGX_FAST_DPM_INIT_SET_OFFSET); DPF(".... SGX_FEATURE_WRITEBACK_DCU: "); SGXUT_decode_build_option(ui32BuildOptions, SGX_FEATURE_DCU_SET_OFFSET); DPF(".... SGX_FEATURE_MP: "); SGXUT_decode_build_option(ui32BuildOptions, SGX_FEATURE_MP_SET_OFFSET); DPF(".... SGX_FEATURE_MP_CORE_COUNT: "); DPF("%d\n", ((ui32BuildOptions >> SGX_FEATURE_MP_CORE_COUNT_SET_OFFSET) & SGX_FEATURE_MP_CORE_COUNT_SET_MASK) +1); DPF(".... SGX_FEATURE_MULTITHREADED_UKERNEL: "); SGXUT_decode_build_option(ui32BuildOptions, SGX_FEATURE_MULTITHREADED_UKERNEL_SET_OFFSET); DPF(".... SGX_FEATURE_OVERLAPPED_SPM: "); SGXUT_decode_build_option(ui32BuildOptions, SGX_FEATURE_OVERLAPPED_SPM_SET_OFFSET); DPF(".... SGX_FEATURE_SYSTEM_CACHE: "); SGXUT_decode_build_option(ui32BuildOptions, SGX_FEATURE_SYSTEM_CACHE_SET_OFFSET); DPF(".... SGX_SUPPORT_HWPROFILING: "); SGXUT_decode_build_option(ui32BuildOptions, SGX_SUPPORT_HWPROFILING_SET_OFFSET); DPF(".... SUPPORT_ACTIVE_POWER_MANAGEMENT: "); SGXUT_decode_build_option(ui32BuildOptions, SUPPORT_ACTIVE_POWER_MANAGEMENT_SET_OFFSET); DPF(".... SUPPORT_DISPLAYCONTROLLER_TILING: "); SGXUT_decode_build_option(ui32BuildOptions, SUPPORT_DISPLAYCONTROLLER_TILING_SET_OFFSET); DPF(".... SUPPORT_PERCONTEXT_PB: "); SGXUT_decode_build_option(ui32BuildOptions, SUPPORT_PERCONTEXT_PB_SET_OFFSET); DPF(".... SUPPORT_SGX_HWPERF: "); SGXUT_decode_build_option(ui32BuildOptions, SUPPORT_SGX_HWPERF_SET_OFFSET); DPF(".... SUPPORT_SGX_MMU_DUMMY_PAGE: "); SGXUT_decode_build_option(ui32BuildOptions, SUPPORT_SGX_MMU_DUMMY_PAGE_SET_OFFSET); DPF(".... SUPPORT_SGX_PRIORITY_SCHEDULING: "); SGXUT_decode_build_option(ui32BuildOptions, SUPPORT_SGX_PRIORITY_SCHEDULING_SET_OFFSET); DPF(".... USE_SUPPORT_NO_TA3D_OVERLAP: "); SGXUT_decode_build_option(ui32BuildOptions, USE_SUPPORT_NO_TA3D_OVERLAP_SET_OFFSET); }
int main(int argc, char ** argv) #endif { PVRSRV_ERROR eResult; PVRSRV_CONNECTION *psConnection; IMG_UINT32 uiNumDevices; PVRSRV_DEVICE_IDENTIFIER asDevID[MAX_NUM_DEVICE_IDS]; PVRSRV_DEV_DATA asDevData[MAX_NUM_DEVICE_IDS]; PVRSRV_DEV_DATA *ps3DDevData = IMG_NULL; IMG_UINT32 i; PVRSRV_SGX_CLIENT_INFO sSGXInfo; IMG_INT loop = 0; IMG_INT frameStop = 1; #ifdef __linux__ IMG_INT c; #endif /* Display class API */ IMG_UINT32 ui32Count; IMG_UINT32 *pui32DeviceID; DISPLAY_INFO sDisplayInfo; DISPLAY_FORMAT *psPrimFormat; DISPLAY_DIMS *psPrimDims; #if defined(SUPPORT_PVRSRV_GET_DC_SYSTEM_BUFFER) PVRSRV_CLIENT_MEM_INFO *psSGXSystemBufferMemInfo; #endif #if defined (SUPPORT_SID_INTERFACE) IMG_SID hDevMemContext; #else IMG_HANDLE hDevMemContext; #endif IMG_UINT32 ui32SharedHeapCount; PVRSRV_HEAP_INFO asHeapInfo[PVRSRV_MAX_CLIENT_HEAPS]; /* may want to define a structure to hang this lot off */ IMG_HANDLE hDisplayDevice; #if defined(SUPPORT_PVRSRV_GET_DC_SYSTEM_BUFFER) #if defined (SUPPORT_SID_INTERFACE) IMG_SID hSystemBuffer = 0; #else IMG_HANDLE hSystemBuffer = IMG_NULL; #endif #endif #ifdef __linux__ #define OPTS "q" while ((c = getopt (argc, argv, OPTS)) != -1) { switch (c) { case 'q': { break; } default: { DPF ("Illegal option %c.\n" "Valid options are "OPTS" (quick)\n", c); exit (EXIT_FAILURE); } } } #else if(argc >= 2) { frameStop = atol(argv[1]); } #endif start_again: uiNumDevices = 10; DPF("----------------------- Start -----------------------\n"); DPF("Try calling PVRSRVConnect with an invalid argument:\n"); eResult = PVRSRVConnect(NULL, 0); FAIL_IF_NO_ERROR(eResult); DPF("Call PVRSRVConnect with a valid argument:\n"); eResult = PVRSRVConnect(&psConnection, 0); FAIL_IF_ERROR(eResult); DPF("Try calling PVRSRVEnumerateDevices with invalid puiNumDevices:\n"); eResult = PVRSRVEnumerateDevices(psConnection, NULL, NULL); FAIL_IF_NO_ERROR(eResult); DPF("Get number of devices from PVRSRVEnumerateDevices:\n"); eResult = PVRSRVEnumerateDevices(psConnection, &uiNumDevices, asDevID); FAIL_IF_ERROR(eResult); DPF(".... Reported %u devices\n", (IMG_UINT) uiNumDevices); /* List the devices */ DPF(".... Device Number | Device Type\n"); for (i = 0; i < uiNumDevices; i++) { DPF(" %04d | ", (IMG_INT)asDevID[i].ui32DeviceIndex); print_dev_type(asDevID[i].eDeviceType); DPF("\n"); } /* Get each device... */ for (i = 0; i < uiNumDevices; i++) { /* Only get services managed devices. Display Class API handles external display devices */ if (asDevID[i].eDeviceType != PVRSRV_DEVICE_TYPE_EXT) { PVRSRV_DEV_DATA *psDevData = asDevData + i; DPF("Attempt to acquire device %d:\n",(IMG_UINT) asDevID[i].ui32DeviceIndex); eResult = PVRSRVAcquireDeviceData ( psConnection, asDevID[i].ui32DeviceIndex, psDevData, PVRSRV_DEVICE_TYPE_UNKNOWN); FAIL_IF_ERROR(eResult); /* Print out details about the SGX device. At the enumeration stage you should get back the device info from which you match a devicetype with index, i.e. we should know what index SGX device is and test for it now. */ if (asDevID[i].eDeviceType == PVRSRV_DEVICE_TYPE_SGX) { /* save off 3d devdata */ ps3DDevData = psDevData; DPF("Getting SGX Client info\n"); eResult = SGXGetClientInfo(psDevData, &sSGXInfo); FAIL_IF_ERROR(eResult); print_sgx_info(&sSGXInfo); } } } if(ps3DDevData == IMG_NULL) { eResult = PVRSRV_ERROR_NO_DEVICEDATA_FOUND; /* PRQA S 3201,3355,3358 1 */ /* ignore warning about unreachable code */ FAIL_IF_ERROR(eResult); } DPF("Display Class API: enumerate devices\n"); /* Count the display devices. */ eResult = PVRSRVEnumerateDeviceClass(psConnection, PVRSRV_DEVICE_CLASS_DISPLAY, &ui32Count, NULL); FAIL_IF_ERROR(eResult); DPF("PVRSRVEnumerateDeviceClass() returns %u display device(s)\n", ui32Count); if(ui32Count == 0) { eResult = PVRSRV_ERROR_NO_DC_DEVICES_FOUND; /* PRQA S 3201,3355,3358 1 */ /* ignore warning about unreachable code */ FAIL_IF_ERROR(eResult); } /* Get the device ids for the devices. */ pui32DeviceID = malloc(sizeof(*pui32DeviceID) * ui32Count); CHECK_MEM_ALLOC(pui32DeviceID); eResult = PVRSRVEnumerateDeviceClass(psConnection, PVRSRV_DEVICE_CLASS_DISPLAY, &ui32Count, pui32DeviceID); FAIL_IF_ERROR(eResult); DPF("Attempt to create memory context for SGX:\n"); eResult = PVRSRVCreateDeviceMemContext(ps3DDevData, &hDevMemContext, &ui32SharedHeapCount, asHeapInfo); FAIL_IF_ERROR(eResult); DPF("Display Class API: open device\n"); /* Pick the first (current) display device. */ hDisplayDevice = PVRSRVOpenDCDevice(ps3DDevData, *pui32DeviceID); if (hDisplayDevice == NULL) { eResult = PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE; } FAIL_IF_ERROR(eResult); free(pui32DeviceID); DPF("Display Class API: Get display info\n"); eResult = PVRSRVGetDCInfo(hDisplayDevice, &sDisplayInfo); FAIL_IF_ERROR(eResult); DPF(".... Name:%s\n", sDisplayInfo.szDisplayName); DPF(".... MaxSwapChains:%u\n", sDisplayInfo.ui32MaxSwapChains); DPF(".... MaxSwapChainBuffers:%u\n", sDisplayInfo.ui32MaxSwapChainBuffers); DPF(".... MinSwapInterval:%u\n", sDisplayInfo.ui32MinSwapInterval); DPF(".... MaxSwapInterval:%u\n", sDisplayInfo.ui32MaxSwapInterval); DPF("Display Class API: enumerate display formats\n"); /* Get number of primary pixel formats. */ eResult = PVRSRVEnumDCFormats(hDisplayDevice, &ui32Count, NULL); FAIL_IF_ERROR(eResult); psPrimFormat = malloc(sizeof(*psPrimFormat) * ui32Count); CHECK_MEM_ALLOC(psPrimFormat); /* Get all primary pixel formats. */ eResult = PVRSRVEnumDCFormats(hDisplayDevice, &ui32Count, psPrimFormat); FAIL_IF_ERROR(eResult); for (i = 0; i < ui32Count; i++) { DPF(".... Display format %u - Pixelformat:%u\n", i, psPrimFormat[i].pixelformat); } DPF("Display Class API: enumerate display dimensions\n"); /* Get number dimensions for the current pixel format. */ eResult = PVRSRVEnumDCDims(hDisplayDevice, &ui32Count, psPrimFormat, NULL); FAIL_IF_ERROR(eResult); psPrimDims = malloc(sizeof(*psPrimDims) * ui32Count); CHECK_MEM_ALLOC(psPrimDims); /* Get all dimension info for the current pixel format. */ eResult = PVRSRVEnumDCDims(hDisplayDevice, &ui32Count, psPrimFormat, psPrimDims); FAIL_IF_ERROR(eResult); for (i = 0; i < ui32Count; i++) { DPF(".... Display dimensions %u - ByteStride:%u Width:%u Height:%u\n", i, psPrimDims[i].ui32ByteStride, psPrimDims[i].ui32Width, psPrimDims[i].ui32Height); } free(psPrimFormat); free(psPrimDims); #if defined(SUPPORT_PVRSRV_GET_DC_SYSTEM_BUFFER) DPF("Display Class API: get the system (primary) buffer\n"); /* Get a handle to the primary surface in the system. */ eResult = PVRSRVGetDCSystemBuffer(hDisplayDevice, &hSystemBuffer); FAIL_IF_ERROR(eResult); #endif for (i = 0; i < ui32SharedHeapCount; i++) { DPF(".... Shared heap %u - HeapID:0x%x DevVAddr:0x%x " "Size:0x%x Attr:0x%x\n", i, asHeapInfo[i].ui32HeapID, asHeapInfo[i].sDevVAddrBase.uiAddr, asHeapInfo[i].ui32HeapByteSize, asHeapInfo[i].ui32Attribs); } DPF("Display Class API: map display surface to SGX\n"); #if defined(SUPPORT_PVRSRV_GET_DC_SYSTEM_BUFFER) eResult = PVRSRVMapDeviceClassMemory(ps3DDevData, hDevMemContext, hSystemBuffer, &psSGXSystemBufferMemInfo); FAIL_IF_ERROR(eResult); #endif SGXUT_query_sgx_corerev(ps3DDevData); #if defined(SUPPORT_SGX_EDM_MEMORY_DEBUG) SGXUT_readmem(ps3DDevData, asHeapInfo, ui32SharedHeapCount, hDevMemContext); #endif #if defined(SUPPORT_PVRSRV_GET_DC_SYSTEM_BUFFER) DPF("Display Class API: unmap display surface from SGX\n"); eResult = PVRSRVUnmapDeviceClassMemory(ps3DDevData, psSGXSystemBufferMemInfo); FAIL_IF_ERROR(eResult); #endif DPF("Attempt to destroy memory context for SGX:\n"); eResult = PVRSRVDestroyDeviceMemContext(ps3DDevData, hDevMemContext); FAIL_IF_ERROR(eResult); DPF("Display Class API: close the device\n"); eResult = PVRSRVCloseDCDevice(psConnection, hDisplayDevice); FAIL_IF_ERROR(eResult); for (i = 0; i < uiNumDevices; i++) { if (asDevID[i].eDeviceType != PVRSRV_DEVICE_TYPE_EXT) { PVRSRV_DEV_DATA *psDevData = asDevData + i; if (asDevID[i].eDeviceType == PVRSRV_DEVICE_TYPE_SGX) { DPF("SGXReleaseClientInfo:\n"); eResult = SGXReleaseClientInfo(psDevData, &sSGXInfo); FAIL_IF_ERROR(eResult); } } } DPF("PVRSRVDisconnect:\n"); eResult = PVRSRVDisconnect(psConnection); FAIL_IF_ERROR(eResult); DPF("---------------------End loop %d---------------------\n", ++loop); if (loop < frameStop) { goto start_again; } return 0; }
static int open_file( char *psz_filename, hnd_t *p_handle, video_info_t *info, cli_input_opt_t *opt ) { y4m_hnd_t *h = malloc( sizeof(y4m_hnd_t) ); int i; uint32_t n, d; char header[MAX_YUV4_HEADER+10]; char *tokend, *header_end; int colorspace = X264_CSP_NONE; int alt_colorspace = X264_CSP_NONE; int alt_bit_depth = 8; if( !h ) return -1; h->next_frame = 0; info->vfr = 0; if( !strcmp( psz_filename, "-" ) ) h->fh = stdin; else h->fh = fopen(psz_filename, "rb"); if( h->fh == NULL ) return -1; h->frame_header_len = strlen( Y4M_FRAME_MAGIC )+1; /* Read header */ for( i = 0; i < MAX_YUV4_HEADER; i++ ) { header[i] = fgetc( h->fh ); if( header[i] == '\n' ) { /* Add a space after last option. Makes parsing "444" vs "444alpha" easier. */ header[i+1] = 0x20; header[i+2] = 0; break; } } if( i == MAX_YUV4_HEADER || strncmp( header, Y4M_MAGIC, strlen( Y4M_MAGIC ) ) ) return -1; /* Scan properties */ header_end = &header[i+1]; /* Include space */ h->seq_header_len = i+1; for( char *tokstart = &header[strlen( Y4M_MAGIC )+1]; tokstart < header_end; tokstart++ ) { if( *tokstart == 0x20 ) continue; switch( *tokstart++ ) { case 'W': /* Width. Required. */ info->width = strtol( tokstart, &tokend, 10 ); tokstart=tokend; break; case 'H': /* Height. Required. */ info->height = strtol( tokstart, &tokend, 10 ); tokstart=tokend; break; case 'C': /* Color space */ colorspace = parse_csp_and_depth( tokstart, &h->bit_depth ); tokstart = strchr( tokstart, 0x20 ); break; case 'I': /* Interlace type */ switch( *tokstart++ ) { case 't': info->interlaced = 1; info->tff = 1; break; case 'b': info->interlaced = 1; info->tff = 0; break; case 'm': info->interlaced = 1; break; //case '?': //case 'p': default: break; } break; case 'F': /* Frame rate - 0:0 if unknown */ if( sscanf( tokstart, "%u:%u", &n, &d ) == 2 && n && d ) { x264_reduce_fraction( &n, &d ); info->fps_num = n; info->fps_den = d; } tokstart = strchr( tokstart, 0x20 ); break; case 'A': /* Pixel aspect - 0:0 if unknown */ /* Don't override the aspect ratio if sar has been explicitly set on the commandline. */ if( sscanf( tokstart, "%u:%u", &n, &d ) == 2 && n && d ) { x264_reduce_fraction( &n, &d ); info->sar_width = n; info->sar_height = d; } tokstart = strchr( tokstart, 0x20 ); break; case 'X': /* Vendor extensions */ if( !strncmp( "YSCSS=", tokstart, 6 ) ) { /* Older nonstandard pixel format representation */ tokstart += 6; alt_colorspace = parse_csp_and_depth( tokstart, &alt_bit_depth ); } tokstart = strchr( tokstart, 0x20 ); break; } } if( colorspace == X264_CSP_NONE ) { colorspace = alt_colorspace; h->bit_depth = alt_bit_depth; } // default to 8bit 4:2:0 if nothing is specified if( colorspace == X264_CSP_NONE ) { colorspace = X264_CSP_I420; h->bit_depth = 8; } FAIL_IF_ERROR( colorspace <= X264_CSP_NONE || colorspace >= X264_CSP_MAX, "colorspace unhandled\n" ) FAIL_IF_ERROR( h->bit_depth < 8 || h->bit_depth > 16, "unsupported bit depth `%d'\n", h->bit_depth ); info->thread_safe = 1; info->num_frames = 0; info->csp = colorspace; h->frame_size = h->frame_header_len; if( h->bit_depth > 8 ) info->csp |= X264_CSP_HIGH_DEPTH; const x264_cli_csp_t *csp = x264_cli_get_csp( info->csp ); for( i = 0; i < csp->planes; i++ ) { h->plane_size[i] = x264_cli_pic_plane_size( info->csp, info->width, info->height, i ); h->frame_size += h->plane_size[i]; /* x264_cli_pic_plane_size returns the size in bytes, we need the value in pixels from here on */ h->plane_size[i] /= x264_cli_csp_depth_factor( info->csp ); } /* Most common case: frame_header = "FRAME" */ if( x264_is_regular_file( h->fh ) ) { uint64_t init_pos = ftell( h->fh ); fseek( h->fh, 0, SEEK_END ); uint64_t i_size = ftell( h->fh ); fseek( h->fh, init_pos, SEEK_SET ); info->num_frames = (i_size - h->seq_header_len) / h->frame_size; } *p_handle = h; return 0; }
static int open_file( char *psz_filename, hnd_t *p_handle, video_info_t *info, cli_input_opt_t *opt ) { raw_hnd_t *h = calloc( 1, sizeof(raw_hnd_t) ); if( !h ) return -1; if( !opt->resolution ) { /* try to parse the file name */ for( char *p = psz_filename; *p; p++ ) if( *p >= '0' && *p <= '9' && sscanf( p, "%dx%d", &info->width, &info->height ) == 2 ) break; } else sscanf( opt->resolution, "%dx%d", &info->width, &info->height ); FAIL_IF_ERROR( !info->width || !info->height, "raw input requires a resolution.\n" ) if( opt->colorspace ) { for( info->csp = X264_CSP_CLI_MAX-1; info->csp > X264_CSP_NONE; info->csp-- ) { if( x264_cli_csps[info->csp].name && !strcasecmp( x264_cli_csps[info->csp].name, opt->colorspace ) ) break; } FAIL_IF_ERROR( info->csp == X264_CSP_NONE, "unsupported colorspace `%s'\n", opt->colorspace ); } else /* default */ info->csp = X264_CSP_I420; h->bit_depth = opt->bit_depth; FAIL_IF_ERROR( h->bit_depth < 8 || h->bit_depth > 16, "unsupported bit depth `%d'\n", h->bit_depth ); if( h->bit_depth > 8 ) info->csp |= X264_CSP_HIGH_DEPTH; if( !strcmp( psz_filename, "-" ) ) h->fh = stdin; else h->fh = x264_fopen( psz_filename, "rb" ); if( h->fh == NULL ) return -1; info->thread_safe = 1; info->num_frames = 0; info->vfr = 0; const x264_cli_csp_t *csp = x264_cli_get_csp( info->csp ); for( int i = 0; i < csp->planes; i++ ) { h->plane_size[i] = x264_cli_pic_plane_size( info->csp, info->width, info->height, i ); h->frame_size += h->plane_size[i]; /* x264_cli_pic_plane_size returns the size in bytes, we need the value in pixels from here on */ h->plane_size[i] /= x264_cli_csp_depth_factor( info->csp ); } if( x264_is_regular_file( h->fh ) ) { fseek( h->fh, 0, SEEK_END ); uint64_t size = ftell( h->fh ); fseek( h->fh, 0, SEEK_SET ); info->num_frames = size / h->frame_size; /* Attempt to use memory-mapped input frames if possible */ if( !(h->bit_depth & 7) ) h->use_mmap = !x264_cli_mmap_init( &h->mmap, h->fh ); } *p_handle = h; return 0; }
static int parse_tcfile( FILE *tcfile_in, timecode_hnd_t *h, video_info_t *info ) { char buff[256]; int ret, tcfv, num, seq_num, timecodes_num; double *timecodes = NULL; double *fpss = NULL; ret = fscanf( tcfile_in, "# timecode format v%d", &tcfv ); FAIL_IF_ERROR( ret != 1 || (tcfv != 1 && tcfv != 2), "unsupported timecode format\n" ) if( tcfv == 1 ) { uint64_t file_pos; double assume_fps, seq_fps; int start, end; int prev_start = -1, prev_end = -1; h->assume_fps = 0; for( num = 2; fgets( buff, sizeof(buff), tcfile_in ) != NULL; num++ ) { if( buff[0] == '#' || buff[0] == '\n' || buff[0] == '\r' ) continue; FAIL_IF_ERROR( sscanf( buff, "assume %lf", &h->assume_fps ) != 1 && sscanf( buff, "Assume %lf", &h->assume_fps ) != 1, "tcfile parsing error: assumed fps not found\n" ) break; } FAIL_IF_ERROR( h->assume_fps <= 0, "invalid assumed fps %.6f\n", h->assume_fps ) file_pos = ftell( tcfile_in ); h->stored_pts_num = 0; for( seq_num = 0; fgets( buff, sizeof(buff), tcfile_in ) != NULL; num++ ) { if( buff[0] == '#' || buff[0] == '\n' || buff[0] == '\r' ) { if( sscanf( buff, "# TDecimate Mode 3: Last Frame = %d", &end ) == 1 ) h->stored_pts_num = end + 1; continue; } ret = sscanf( buff, "%d,%d,%lf", &start, &end, &seq_fps ); FAIL_IF_ERROR( ret != 3 && ret != EOF, "invalid input tcfile\n" ) FAIL_IF_ERROR( start > end || start <= prev_start || end <= prev_end || seq_fps <= 0, "invalid input tcfile at line %d: %s\n", num, buff ) prev_start = start; prev_end = end; if( h->auto_timebase_den || h->auto_timebase_num ) ++seq_num; } if( !h->stored_pts_num ) h->stored_pts_num = end + 1; timecodes_num = h->stored_pts_num; fseek( tcfile_in, file_pos, SEEK_SET ); timecodes = malloc( timecodes_num * sizeof(double) ); if( !timecodes ) return -1; if( h->auto_timebase_den || h->auto_timebase_num ) { fpss = malloc( (seq_num + 1) * sizeof(double) ); if( !fpss ) goto fail; } assume_fps = correct_fps( h->assume_fps, h ); if( assume_fps < 0 ) goto fail; timecodes[0] = 0; for( num = seq_num = 0; num < timecodes_num - 1; ) { fgets( buff, sizeof(buff), tcfile_in ); if( buff[0] == '#' || buff[0] == '\n' || buff[0] == '\r' ) continue; ret = sscanf( buff, "%d,%d,%lf", &start, &end, &seq_fps ); if( ret != 3 ) start = end = timecodes_num - 1; for( ; num < start && num < timecodes_num - 1; num++ ) timecodes[num + 1] = timecodes[num] + 1 / assume_fps; if( num < timecodes_num - 1 ) { if( h->auto_timebase_den || h->auto_timebase_num ) fpss[seq_num++] = seq_fps; seq_fps = correct_fps( seq_fps, h ); if( seq_fps < 0 ) goto fail; for( num = start; num <= end && num < timecodes_num - 1; num++ ) timecodes[num + 1] = timecodes[num] + 1 / seq_fps; } } if( h->auto_timebase_den || h->auto_timebase_num ) fpss[seq_num] = h->assume_fps; if( h->auto_timebase_num && !h->auto_timebase_den ) { double exponent; double assume_fps_sig, seq_fps_sig; if( try_mkv_timebase_den( fpss, h, seq_num + 1 ) < 0 ) goto fail; fseek( tcfile_in, file_pos, SEEK_SET ); assume_fps_sig = sigexp10( h->assume_fps, &exponent ); assume_fps = MKV_TIMEBASE_DEN / ( round( MKV_TIMEBASE_DEN / assume_fps_sig ) / exponent ); for( num = 0; num < timecodes_num - 1; ) { fgets( buff, sizeof(buff), tcfile_in ); if( buff[0] == '#' || buff[0] == '\n' || buff[0] == '\r' ) continue; ret = sscanf( buff, "%d,%d,%lf", &start, &end, &seq_fps ); if( ret != 3 ) start = end = timecodes_num - 1; seq_fps_sig = sigexp10( seq_fps, &exponent ); seq_fps = MKV_TIMEBASE_DEN / ( round( MKV_TIMEBASE_DEN / seq_fps_sig ) / exponent ); for( ; num < start && num < timecodes_num - 1; num++ ) timecodes[num + 1] = timecodes[num] + 1 / assume_fps; for( num = start; num <= end && num < timecodes_num - 1; num++ ) timecodes[num + 1] = timecodes[num] + 1 / seq_fps; } } if( fpss ) free( fpss ); h->assume_fps = assume_fps; h->last_timecode = timecodes[timecodes_num - 1]; }