void Init_openvxruby() { context = vxCreateContext(); vx_status status = vxQueryContext(context, VX_QUERY_CONTEXT_IMPLEMENTATION, implementation, sizeof(implementation)); if (status != VX_SUCCESS) { REXT_PRINT("context = "VX_FMT_REF" status = %d\n", context, status); strncpy(implementation, "khronos.sample (bad)", sizeof(implementation)); } rb_mOpenVXString = rb_str_new2(implementation); rubyext_modules(Qnil, modules, dimof(modules)); // there is no "unload" }
int main(void) { vx_context context = vxCreateContext(); vx_uint8 value = 8; vx_graph graph = vxCreateGraph(context); vx_image images[] = { vxCreateUniformImage(context, 640, 480, VX_DF_IMAGE_U8, &value), vxCreateImage(context, 640, 480, VX_DF_IMAGE_U8) }; vx_image intermediate_images[] = { vxCreateVirtualImage(graph, 0, 0, VX_DF_IMAGE_S16) }; /*Create the depth conversion nodes*/ vx_uint32 uint_value2 = 0; vx_scalar vx_value2 = vxCreateScalar(context, VX_TYPE_INT32, &uint_value2); vx_uint32 uint_value1 = 0; vx_scalar vx_value1 = vxCreateScalar(context, VX_TYPE_INT32, &uint_value1); /* The order in which these two nodes are created should not matter. */ vxConvertDepthNode(graph, intermediate_images[0], images[1], VX_CONVERT_POLICY_SATURATE, vx_value2); vxConvertDepthNode(graph, images[0], intermediate_images[0], VX_CONVERT_POLICY_SATURATE, vx_value1); vx_status status = vxVerifyGraph(graph); if (status == VX_SUCCESS) { status = vxProcessGraph(graph); } if (status != VX_SUCCESS) { fprintf(stderr, "badness\n"); abort (); } exit (0); }
//////// // main() has all the OpenVX application code for this exercise. // Command-line usage: // % exercise3 [<video-sequence>|<camera-device-number>] // When neither video sequence nor camera device number is specified, // it defaults to the video sequence in "PETS09-S1-L1-View001.avi". int main( int argc, char * argv[] ) { // Get default video sequence when nothing is specified on command-line and // instantiate OpenCV GUI module for reading input RGB images and displaying // the image with OpenVX results const char * video_sequence = argv[1]; CGuiModule gui( video_sequence ); // Try grab first video frame from the sequence using cv::VideoCapture // and check if video frame is available if( !gui.Grab() ) { printf( "ERROR: input has no video\n" ); return 1; } //////// // Set the application configuration parameters. Note that input video // sequence is an 8-bit RGB image with dimensions given by gui.GetWidth() // and gui.GetHeight(). The parameters for the tensors are: // tensor_dims - 3 dimensions of tensor [3 x <width> x <height>] // tensor_input_fixed_point_pos - fixed-point position for input tensor // tensor_output_fixed_point_pos - fixed-point position for output tensor vx_uint32 width = gui.GetWidth(); vx_uint32 height = gui.GetHeight(); vx_size tensor_dims[3] = { width, height, 3 }; // 3 channels (RGB) vx_uint8 tensor_input_fixed_point_pos = 5; // Q10.5: input[-128..127] will be mapped to -4..3.96875 vx_uint8 tensor_output_fixed_point_pos = 7; // Q8.7: output[-1..1] will be mapped to -128 to 128 //////// // Create the OpenVX context and make sure returned context is valid and // register the log_callback to receive messages from OpenVX framework. vx_context context = vxCreateContext(); ERROR_CHECK_OBJECT( context ); vxRegisterLogCallback( context, log_callback, vx_false_e ); //////// // Register user kernels with the context. // // TODO STEP 05:******** // 1. Register user kernel with context by calling your implementation of "registerUserKernel()". // ERROR_CHECK_STATUS( registerUserKernel( context ) ); //////// // Create OpenVX tensor objects for input and output // // TODO STEP 06:******** // 1. Create tensor objects using tensor_dims, tensor_input_fixed_point_pos, and // tensor_output_fixed_point_pos // vx_tensor input_tensor = vxCreateTensor( context, 3, tensor_dims, VX_TYPE_INT16, tensor_input_fixed_point_pos ); // vx_tensor output_tensor = vxCreateTensor( context, /* Fill in parameters */ ); // ERROR_CHECK_OBJECT( input_tensor ); // ERROR_CHECK_OBJECT( output_tensor ); //////// // Create, build, and verify the graph with user kernel node. // // TODO STEP 07:******** // 1. Build a graph with just one node created using userTensorCosNode() // vx_graph graph = vxCreateGraph( context ); // ERROR_CHECK_OBJECT( graph ); // vx_node cos_node = userTensorCosNode( graph, /* Fill in parameters */ ); // ERROR_CHECK_OBJECT( cos_node ); // ERROR_CHECK_STATUS( vxReleaseNode( &cos_node ) ); // ERROR_CHECK_STATUS( vxVerifyGraph( graph ) ); //////// // Process the video sequence frame by frame until the end of sequence or aborted. cv::Mat bgrMatForOutputDisplay( height, width, CV_8UC3 ); for( int frame_index = 0; !gui.AbortRequested(); frame_index++ ) { //////// // Copy input RGB frame from OpenCV into input_tensor with UINT8 to Q10.5 (INT16) conversion. // In order to do this, vxMapTensorPatch API (see "vx_ext_amd.h"). // // TODO STEP 08:******** // 1. Use vxMapTensorPatch API for access to input tensor object for writing // 2. Copy UINT8 data from OpenCV RGB image to tensor object // 3. Use vxUnmapTensorPatch API to return control of buffer back to framework vx_uint8 * cv_rgb_image_buffer = gui.GetBuffer(); vx_size rgb_stride = gui.GetStride(); // vx_size zeros[3] = { 0 }; // vx_size tensor_stride[3]; // vx_map_id map_id; // vx_uint8 * buf; // ERROR_CHECK_STATUS( vxMapTensorPatch( input_tensor, // 3, /* Fill in parameters */ // &map_id, tensor_stride, // (void **)&buf, VX_WRITE_ONLY, VX_MEMORY_TYPE_HOST, 0 ) ); // for( vx_size c = 0; c < 3; c++ ) // { // for( vx_size y = 0; y < height; y++ ) // { // const vx_uint8 * img = cv_rgb_image_buffer + y * rgb_stride + c; // vx_int16 * inp = (vx_int16 *)(buf + y * tensor_stride[1] + c * tensor_stride[2]); // for( vx_size x = 0; x < width; x++ ) // { // // convert 0..255 to Q10.5 [-4..3.96875 range] fixed-point format // inp[x] = (vx_int16)img[x * 3] - 128; // } // } // } // ERROR_CHECK_STATUS( vxUnmapTensorPatch( input_tensor, map_id ) ); //////// // Now that input tensor is ready, just run the graph. // // TODO STEP 09:******** // 1. Call vxProcessGraph to execute the tensor_cos kernel in graph // ERROR_CHECK_STATUS( vxProcessGraph( graph ) ); //////// // Display the output tensor object as RGB image // // TODO STEP 10:******** // 1. Use vxMapTensorPatch API for access to output tensor object for reading // 2. Copy tensor object data into OpenCV RGB image // 3. Use vxUnmapTensorPatch API to return control of buffer back to framework // ERROR_CHECK_STATUS( vxMapTensorPatch( output_tensor, // 3, zeros, tensor_dims, // &map_id, tensor_stride, // (void **)&buf, VX_WRITE_ONLY, VX_MEMORY_TYPE_HOST, 0 ) ); // vx_uint8 * cv_bgr_image_buffer = bgrMatForOutputDisplay.data; // vx_size bgr_stride = bgrMatForOutputDisplay.step; // for( vx_size c = 0; c < 3; c++ ) // { // for( vx_size y = 0; y < height; y++ ) // { // const vx_int16 * out = (const vx_int16 *)(buf + y * tensor_stride[1] + c * tensor_stride[2]); // vx_uint8 * img = cv_bgr_image_buffer + y * bgr_stride + (2 - c); // (2 - c) for RGB to BGR conversion // for( vx_size x = 0; x < width; x++ ) // { // // scale convert Q8.7 [-1..1 range] fixed-point format to 0..255 with saturation // vx_int16 value = out[x] + 128; // value = value > 255 ? 255 : value; // saturation needed // img[x * 3] = (vx_uint8)value; // } // } // } //#if ENABLE_DISPLAY // cv::imshow( "Cosine", bgrMatForOutputDisplay ); //#endif // ERROR_CHECK_STATUS( vxUnmapTensorPatch( output_tensor, map_id ) ); //////// // Display the results and grab the next input RGB frame for the next iteration. char text[128]; sprintf( text, "Keyboard ESC/Q-Quit SPACE-Pause [FRAME %d] [fixed_point_pos input:%d output:%d]", frame_index, tensor_input_fixed_point_pos, tensor_output_fixed_point_pos ); gui.DrawText( 0, 16, text ); gui.Show(); if( !gui.Grab() ) { // Terminate the processing loop if the end of sequence is detected. gui.WaitForKey(); break; } } ////////******** // To release an OpenVX object, you need to call vxRelease<Object> API which takes a pointer to the object. // If the release operation is successful, the OpenVX framework will reset the object to NULL. // // TODO STEP 11:**** // 1. Release graph and tensor objects // ERROR_CHECK_STATUS( vxReleaseGraph( &graph ) ); // ERROR_CHECK_STATUS( vxReleaseTensor( &input_tensor ) ); // ERROR_CHECK_STATUS( vxReleaseTensor( &output_tensor ) ); ERROR_CHECK_STATUS( vxReleaseContext( &context ) ); return 0; }
//////// // main() has all the OpenVX application code for this exercise. // Command-line usage: // % solution_exercise2 [<video-sequence>|<camera-device-number>] // When neither video sequence nor camera device number is specified, // it defaults to the video sequence in "PETS09-S1-L1-View001.avi". int main( int argc, char * argv[] ) { // Get default video sequence when nothing is specified on command-line and // instantiate OpenCV GUI module for reading input RGB images and displaying // the image with OpenVX results. const char * video_sequence = argv[1]; CGuiModule gui( video_sequence ); // Try to grab the first video frame from the sequence using cv::VideoCapture // and check if a video frame is available. if( !gui.Grab() ) { printf( "ERROR: input has no video\n" ); return 1; } //////// // Set the application configuration parameters. Note that input video // sequence is an 8-bit RGB image with dimensions given by gui.GetWidth() // and gui.GetHeight(). The parameters for the Harris corners algorithm are: // max_keypoint_count - maximum number of keypoints to track // harris_strength_thresh - minimum threshold score to keep a corner // (computed using the normalized Sobel kernel) // harris_min_distance - radial L2 distance for non-max suppression // harris_k_sensitivity - sensitivity threshold k from the Harris-Stephens // harris_gradient_size - window size for gradient computation // harris_block_size - block window size used to compute the // Harris corner score // lk_pyramid_levels - number of pyramid levels for LK optical flow // lk_termination - can be VX_TERM_CRITERIA_ITERATIONS or // VX_TERM_CRITERIA_EPSILON or // VX_TERM_CRITERIA_BOTH // lk_epsilon - error for terminating the algorithm // lk_num_iterations - number of iterations // lk_use_initial_estimate - turn on/off use of initial estimates // lk_window_dimension - size of window on which to perform the algorithm vx_uint32 width = gui.GetWidth(); vx_uint32 height = gui.GetHeight(); vx_size max_keypoint_count = 10000; vx_float32 harris_strength_thresh = 0.0005f; vx_float32 harris_min_distance = 5.0f; vx_float32 harris_k_sensitivity = 0.04f; vx_int32 harris_gradient_size = 3; vx_int32 harris_block_size = 3; vx_uint32 lk_pyramid_levels = 6; vx_float32 lk_pyramid_scale = VX_SCALE_PYRAMID_HALF; vx_enum lk_termination = VX_TERM_CRITERIA_BOTH; vx_float32 lk_epsilon = 0.01f; vx_uint32 lk_num_iterations = 5; vx_bool lk_use_initial_estimate = vx_false_e; vx_uint32 lk_window_dimension = 6; //////// // Create the OpenVX context and make sure the returned context is valid and // register the log_callback to receive messages from OpenVX framework. vx_context context = vxCreateContext(); ERROR_CHECK_OBJECT( context ); vxRegisterLogCallback( context, log_callback, vx_false_e ); //////// // Create OpenVX image object for input RGB image. vx_image input_rgb_image = vxCreateImage( context, width, height, VX_DF_IMAGE_RGB ); ERROR_CHECK_OBJECT( input_rgb_image ); ////////******** // OpenVX optical flow functionality requires pyramids of the current input // image and the previous image. It also requires keypoints that correspond // to the previous pyramid and will output updated keypoints into // another keypoint array. To be able to toggle between the current and // the previous buffers, you need to use OpenVX delay objects and vxAgeDelay(). // Create OpenVX pyramid and array object exemplars and create OpenVX delay // objects for both to hold two of each. Note that the exemplar objects are not // needed once the delay objects are created. // // TODO STEP 01:******** // 1. Use vxCreatePyramid API to create a pyramid exemplar with the // same dimensions as the input image, VX_DF_IMAGE_U8 as image format, // lk_pyramid_levels as levels, and lk_pyramid_scale as scale. // We gave code for this in comments. // 2. Use vxCreateArray API to create an array exemplar with // keypoint data type with num_keypoint_count as capacity. // You need to add missing parameters to code in comments. // 3. Use vxCreateDelay API to create delay objects for pyramid and // keypoint array using the exemplars created using the two steps above. // Use 2 delay slots for both of the delay objects. // We gave code for one in comments; do similar for the other. // 4. Release the pyramid and keypoint array exemplar objects. // We gave code for one in comments; do similar for the other. // 5. Use ERROR_CHECK_OBJECT/STATUS macros for proper error checking. // We gave few error checks; do similar for the others. // vx_pyramid pyramidExemplar = vxCreatePyramid( context, lk_pyramid_levels, // lk_pyramid_scale, width, height, VX_DF_IMAGE_U8 ); // ERROR_CHECK_OBJECT( pyramidExemplar ); // vx_delay pyramidDelay = vxCreateDelay( context, ( vx_reference )pyramidExemplar, 2 ); // ERROR_CHECK_OBJECT( pyramidDelay ); // ERROR_CHECK_STATUS( vxReleasePyramid( &pyramidExemplar ) ); // vx_array keypointsExemplar = vxCreateArray( /* Fill in parameters */ ); // vx_delay keypointsDelay = vxCreateDelay( /* Fill in parameters */ ); ////////******** // An object from a delay slot can be accessed using vxGetReferenceFromDelay API. // You need to use index = 0 for the current object and index = -1 for the previous object. // // TODO STEP 02:******** // 1. Use vxGetReferenceFromDelay API to get the current and previous // pyramid objects from pyramid delay object. Note that you need // to typecast the vx_reference object to vx_pyramid. // We gave code for one in comments; do similar for the other. // 2. Similarly, get the current and previous keypoint array objects from // the keypoint delay object. // We gave code for one in comments; do similar for the other. // 3. Use ERROR_CHECK_OBJECT for proper error checking. // We gave one error check; do similar for the others. // vx_pyramid currentPyramid = ( vx_pyramid ) vxGetReferenceFromDelay( pyramidDelay, 0 ); // vx_pyramid previousPyramid = ( vx_pyramid ) vxGetReferenceFromDelay( /* Fill in parameters */ ); // vx_array currentKeypoints = ( vx_array ) vxGetReferenceFromDelay( /* Fill in parameters */ ); // vx_array previousKeypoints = ( vx_array ) vxGetReferenceFromDelay( keypointsDelay, -1 ); // ERROR_CHECK_OBJECT( currentPyramid ); ////////******** // Harris and optical flow algorithms require their own graph objects. // The Harris graph needs to extract gray scale image out of input RGB, // compute an initial set of keypoints, and compute an initial pyramid for use // by the optical flow graph. // // TODO STEP 03:******** // 1. Create two graph objects: one for the Harris corner detector and // the other for feature tracking using optical flow using the // vxCreateGraph API. // We gave code for one graph; do similar for the other. // 2. Use ERROR_CHECK_OBJECT to check the objects. // We gave one error check; do similar for the other. // vx_graph graphHarris = vxCreateGraph( context ); // vx_graph graphTrack = /* Fill in here */; // ERROR_CHECK_OBJECT( graphHarris ); ////////******** // Harris and pyramid computation expect input to be an 8-bit image. // Given that input is an RGB image, it is best to extract a gray image // from RGB image, which requires two steps: // - perform RGB to IYUV color conversion // - extract Y channel from IYUV image // This requires two intermediate OpenVX image objects. Since you don't // need to access these objects from the application, they can be virtual // objects that can be created using the vxCreateVirtualImage API. // // TODO STEP 04:******** // 1. Create an IYUV image and a U8 image (for Y channel) with the same // dimensions as the input RGB image. Note that the image formats for // IYUV and U8 images are VX_DF_IMAGE_IYUV and VX_DF_IMAGE_U8. // Note that virtual objects are specific to a graph, so you // need to create two sets, one for each graph. // We gave one fully in comments and you need to fill in missing // parameters for the others. // 2. Use ERROR_CHECK_OBJECT to check the objects. // We gave one error check in comments; do similar for others. // vx_image harris_yuv_image = vxCreateVirtualImage( graphHarris, width, height, VX_DF_IMAGE_IYUV ); // vx_image harris_luma_image = vxCreateVirtualImage( graphHarris, /* Fill in parameters */ ); // vx_image opticalflow_yuv_image = vxCreateVirtualImage( graphTrack, /* Fill in parameters */ ); // vx_image opticalflow_luma_image = vxCreateVirtualImage( /* Fill in parameters */ ); // ERROR_CHECK_OBJECT( harris_yuv_image ); ////////******** // The Harris corner detector and optical flow nodes (see "VX/vx_nodes.h") // take strength_thresh, min_distance, sensitivity, epsilon, // num_iterations, and use_initial_estimate parameters as scalar // data objects. So, you need to create scalar objects with the corresponding // configuration parameters. // // TODO STEP 05:******** // 1. Create scalar data objects of VX_TYPE_FLOAT32 for strength_thresh, // min_distance, sensitivity, and epsilon. Set their // initial values to harris_strength_thresh, harris_min_distance, // harris_k_sensitivity, and lk_epsilon. // We gave code full code for one scalar in comments; fill in // missing arguments for other ones. // 2. Similarly, create scalar objects for num_iterations and // use_initial_estimate with initial values: lk_num_iterations and // lk_use_initial_estimate. Make sure to use proper data types for // these parameters. // We gave code full code for one scalar in comments; fill in // missing arguments for the other. // 3. Use ERROR_CHECK_OBJECT to check proper creation of objects. // We gave the error check for one scalar; do similar for other 5 scalars. // vx_scalar strength_thresh = NULL; // vxCreateScalar( context, VX_TYPE_FLOAT32, &harris_strength_thresh ); // vx_scalar min_distance = NULL; // vxCreateScalar( context, /* Fill in parameters */ ); // vx_scalar sensitivity = NULL; // vxCreateScalar( /* Fill in parameters */ ); // vx_scalar epsilon = NULL; // vxCreateScalar( /* Fill in parameters */ ); // vx_scalar num_iterations = NULL; // vxCreateScalar( context, VX_TYPE_UINT32, /* Fill in parameter */ ); // vx_scalar use_initial_estimate = NULL; // vxCreateScalar( context, VX_TYPE_BOOL, &lk_use_initial_estimate ); // ERROR_CHECK_OBJECT( strength_thresh ); ////////******** // Now all the objects have been created for building the graphs. // First, build a graph that performs Harris corner detection and initial pyramid computation. // See "VX/vx_nodes.h" for APIs how to add nodes into a graph. // // TODO STEP 06:******** // 1. Use vxColorConvertNode and vxChannelExtractNode APIs to get gray // scale image for Harris and Pyramid computation from the input // RGB image. Add these nodes into Harris graph. // We gave code in comments with a missing parameter for you to fill in. // 2. Use vxGaussianPyramidNode API to add pyramid computation node. // You need to use the current pyramid from the pyramid delay object. // We gave code in comments with a missing parameter for you to fill in. // 3. Use vxHarrisCornersNode API to add a Harris corners node. // You need to use the current keypoints from keypoints delay object. // We gave code in comments with few missing parameters for you to fill in. // 4. Use ERROR_CHECK_OBJECT to check proper creation of objects. // 5. Release node and virtual objects immediately since the graph // retains references to them. // 6. Call vxVerifyGraph to check for any errors in the graph. // Fill in missing parameter in commented code. // vx_node nodesHarris[] = // { // vxColorConvertNode( graphHarris, input_rgb_image, harris_yuv_image ), // vxChannelExtractNode( graphHarris, /* Fill in parameter */, VX_CHANNEL_Y, harris_luma_image ), // vxGaussianPyramidNode( graphHarris, /* Fill in parameter */, currentPyramid ), // vxHarrisCornersNode( graphHarris, /* Fill in missing parameters */, currentKeypoints, NULL ) // }; // for( vx_size i = 0; i < sizeof( nodesHarris ) / sizeof( nodesHarris[0] ); i++ ) // { // ERROR_CHECK_OBJECT( nodesHarris[i] ); // ERROR_CHECK_STATUS( vxReleaseNode( &nodesHarris[i] ) ); // } // ERROR_CHECK_STATUS( vxReleaseImage( &harris_yuv_image ) ); // ERROR_CHECK_STATUS( vxReleaseImage( &harris_luma_image ) ); // ERROR_CHECK_STATUS( vxVerifyGraph( /* Fill in parameter */ ) ); ////////******** // Now, build a graph that performs pyramid computation and feature // tracking using optical flow. // // TODO STEP 07:******** // 1. Use vxColorConvertNode and vxChannelExtractNode APIs to get a gray // scale image for Harris and Pyramid computation from the input // RGB image. Add these nodes into Harris graph. // We gave the code in comments for color convert node; do similar // one for the channel extract node. // 2. Use vxGaussianPyramidNode API to add pyramid computation node. // You need to use the current pyramid from the pyramid delay object. // Most of the code is given in the comments; fill in the missing parameter. // 3. Use vxOpticalFlowPyrLKNode API to add an optical flow node. You need to // use the current and previous keypoints from the keypoints delay object. // Fill in the missing parameters in commented code. // 4. Use ERROR_CHECK_OBJECT to check proper creation of objects. // 5. Release node and virtual objects immediately since the graph // retains references to them. // 6. Call vxVerifyGraph to check for any errors in the graph. // Fill in the missing parameter in commented code. // vx_node nodesTrack[] = // { // vxColorConvertNode( graphTrack, input_rgb_image, opticalflow_yuv_image ), // vxChannelExtractNode( graphTrack, /* Fill in parameters */ ), // vxGaussianPyramidNode( graphTrack, /* Fill in parameter */, currentPyramid ), // vxOpticalFlowPyrLKNode( graphTrack, /* Fill in parameters */ ) // }; // for( vx_size i = 0; i < sizeof( nodesTrack ) / sizeof( nodesTrack[0] ); i++ ) // { // ERROR_CHECK_OBJECT( nodesTrack[i] ); // ERROR_CHECK_STATUS( vxReleaseNode( &nodesTrack[i] ) ); // } // ERROR_CHECK_STATUS( vxReleaseImage( &opticalflow_yuv_image ) ); // ERROR_CHECK_STATUS( vxReleaseImage( &opticalflow_luma_image ) ); // ERROR_CHECK_STATUS( vxVerifyGraph( /* Fill in parameter */ ) ); //////// // Process the video sequence frame by frame until the end of sequence or aborted. for( int frame_index = 0; !gui.AbortRequested(); frame_index++ ) { //////// // Copy the input RGB frame from OpenCV to OpenVX. // In order to do this, you need to use vxAccessImagePatch and vxCommitImagePatch APIs. // See "VX/vx_api.h" for the description of these APIs. vx_rectangle_t cv_rgb_image_region; cv_rgb_image_region.start_x = 0; cv_rgb_image_region.start_y = 0; cv_rgb_image_region.end_x = width; cv_rgb_image_region.end_y = height; vx_imagepatch_addressing_t cv_rgb_image_layout; cv_rgb_image_layout.stride_x = 3; cv_rgb_image_layout.stride_y = gui.GetStride(); vx_uint8 * cv_rgb_image_buffer = gui.GetBuffer(); ERROR_CHECK_STATUS( vxAccessImagePatch( input_rgb_image, &cv_rgb_image_region, 0, &cv_rgb_image_layout, ( void ** )&cv_rgb_image_buffer, VX_WRITE_ONLY ) ); ERROR_CHECK_STATUS( vxCommitImagePatch( input_rgb_image, &cv_rgb_image_region, 0, &cv_rgb_image_layout, cv_rgb_image_buffer ) ); ////////******** // Now that input RGB image is ready, just run a graph. // Run Harris at the beginning to initialize the previous keypoints. // // TODO STEP 08:******** // 1. Run a graph using vxProcessGraph API. Select Harris graph // if the frame_index == 0 (i.e., the first frame of the video // sequence), otherwise, select the feature tracking graph. // 2. Use ERROR_CHECK_STATUS for error checking. ////////******** // To mark the keypoints in display, you need to access the output // keypoint array and draw each item on the output window using gui.DrawArrow(). // // TODO STEP 09:******** // 1. Use vxGetReferenceFromDelay API to get the current and previous // keypoints array objects from the keypoints delay object. // Make sure to typecast the vx_reference object to vx_array. // We gave one for the previous previous keypoint array in comments; // do a similar one for the current keypoint array. // 2. OpenVX array object has an attribute that keeps the current // number of items in the array. The name of the attribute is // VX_ARRAY_ATTRIBUTE_NUMITEMS and its value is of type vx_size. // Use vxQueryArray API to get number of keypoints in the // current keypoint array data object, representing number of // corners detected in the input RGB image. // IMPORTANT: Read number of items into "num_corners" // because this variable is displayed by code segment below. // We gave most part of this statement in comment; just fill in the // missing parameter. // 3. The data items in output keypoint array are of type // vx_keypoint_t (see "VX/vx_types.h"). To access the array // buffer, use vxAccessArrayRange with start index = 0, // end index = number of items in the array, and usage mode = // VX_READ_ONLY. Note that the stride returned by this access // call is not guaranteed to be sizeof(vx_keypoint_t). // Also make sure that num_corners is > 0, because // vxAccessArrayRange expects end index > 0. // We gave the code for previous keypoint array in comment; // do similar one for the current keypoint array. // 4. For each item in the keypoint buffer, use vxArrayItem to // access an individual keypoint and draw a marker at (x,y) // using gui.DrawArrow() if tracking_status field of keypoint // is non-zero. Also count number of keypoints with // non-zero tracking_status into "num_tracking" variable. // We gave most of the code; fill in the missing parameters and uncomment. // 5. Hand the control of output keypoint buffer over back to // OpenVX framework by calling vxCommitArrayRange API. // We gave the code for previous keypoint array in comment; // do similar one for the current keypoint array. // 6. Use ERROR_CHECK_STATUS for error checking. vx_size num_corners = 0, num_tracking = 0; // previousKeypoints = ( vx_array )vxGetReferenceFromDelay( keypointsDelay, -1 ); // currentKeypoints = ( vx_array )vxGetReferenceFromDelay( /* Fill in parameters */ ); // ERROR_CHECK_OBJECT( currentKeypoints ); // ERROR_CHECK_OBJECT( previousKeypoints ); // ERROR_CHECK_STATUS( vxQueryArray( previousKeypoints, /* Fill in parameter */, &num_corners, sizeof( num_corners ) ) ); if( num_corners > 0 ) { vx_size kp_old_stride, kp_new_stride; vx_keypoint_t * kp_old_buf = NULL, * kp_new_buf = NULL; // ERROR_CHECK_STATUS( vxAccessArrayRange( previousKeypoints, 0, num_corners, // &kp_old_stride, ( void ** ) &kp_old_buf, VX_READ_ONLY ) ); // ERROR_CHECK_STATUS( vxAccessArrayRange( /* Fill in parameters */ ); for( vx_size i = 0; i < num_corners; i++ ) { // vx_keypoint_t * kp_old = &vxArrayItem( vx_keypoint_t, kp_old_buf, i, kp_old_stride ); // vx_keypoint_t * kp_new = &vxArrayItem( /* Fill in parameters */ ); // if( kp_new->tracking_status ) // { // num_tracking++; // gui.DrawArrow( kp_old->x, kp_old->y, kp_new->x, kp_new->y ); // } } // ERROR_CHECK_STATUS( vxCommitArrayRange( previousKeypoints, 0, num_corners, kp_old_buf ) ); // ERROR_CHECK_STATUS( vxCommitArrayRange( /* Fill in parameters */ ) ); } ////////******** // Flip the current and previous pyramid and keypoints in the delay objects. // // TODO STEP 10:******** // 1. Use vxAgeDelay API to flip the current and previous buffers in delay objects. // You need to call vxAgeDelay for both two delay objects. // 2. Use ERROR_CHECK_STATUS for error checking. // ERROR_CHECK_STATUS( vxAgeDelay( /* Fill in parameter */ ) ); // ERROR_CHECK_STATUS( vxAgeDelay( /* Fill in parameter */ ) ); //////// // Display the results and grab the next input RGB frame for the next iteration. char text[128]; sprintf( text, "Keyboard ESC/Q-Quit SPACE-Pause [FRAME %d]", frame_index ); gui.DrawText( 0, 16, text ); sprintf( text, "Number of Corners: %d [tracking %d]", ( int )num_corners, ( int )num_tracking ); gui.DrawText( 0, 36, text ); gui.Show(); if( !gui.Grab() ) { // Terminate the processing loop if the end of sequence is detected. gui.WaitForKey(); break; } } ////////******** // Query graph performance using VX_GRAPH_ATTRIBUTE_PERFORMANCE and print timing // in milliseconds. Note that time units of vx_perf_t fields are nanoseconds. // // TODO STEP 11:******** // 1. Use vxQueryGraph API with VX_GRAPH_ATTRIBUTE_PERFORMANCE to query graph performance. // We gave the attribute query for one graph in comments. Do the same for the second graph. // 2. Print the average and min execution times in milliseconds. Use the printf in comments. // vx_perf_t perfHarris = { 0 }, perfTrack = { 0 }; // ERROR_CHECK_STATUS( vxQueryGraph( graphHarris, VX_GRAPH_ATTRIBUTE_PERFORMANCE, &perfHarris, sizeof( perfHarris ) ) ); // ERROR_CHECK_STATUS( vxQueryGraph( /* Fill in parameters here for get performance of the other graph */ ); // printf( "GraphName NumFrames Avg(ms) Min(ms)\n" // "Harris %9d %7.3f %7.3f\n" // "Track %9d %7.3f %7.3f\n", // ( int )perfHarris.num, ( float )perfHarris.avg * 1e-6f, ( float )perfHarris.min * 1e-6f, // ( int )perfTrack.num, ( float )perfTrack.avg * 1e-6f, ( float )perfTrack.min * 1e-6f ); ////////******** // Release all the OpenVX objects created in this exercise, and make the context as the last one to release. // To release an OpenVX object, you need to call vxRelease<Object> API which takes a pointer to the object. // If the release operation is successful, the OpenVX framework will reset the object to NULL. // // TODO STEP 12:******** // 1. For releasing all other objects use vxRelease<Object> APIs. // You have to release 2 graph objects, 1 image object, 2 delay objects, // 6 scalar objects, and 1 context object. // 2. Use ERROR_CHECK_STATUS for error checking. // ERROR_CHECK_STATUS( vxReleaseContext( &context ) ); return 0; }
/*! \brief The graph factory example. * \ingroup group_example */ int main(int argc, char *argv[]) { vx_status status = VX_SUCCESS; vx_context context = vxCreateContext(); if (context) { vx_image images[] = { vxCreateImage(context, 640, 480, VX_DF_IMAGE_U8), vxCreateImage(context, 640, 480, VX_DF_IMAGE_S16), }; vx_graph graph = vxGraphFactory(context, VX_GRAPH_FACTORY_EDGE); if (graph) { vx_uint32 p, num = 0; status |= vxQueryGraph(graph, VX_GRAPH_ATTRIBUTE_NUMPARAMETERS, &num, sizeof(num)); if (status == VX_SUCCESS) { printf("There are %u parameters to this graph!\n", num); for (p = 0; p < num; p++) { vx_parameter param = vxGetGraphParameterByIndex(graph, p); if (param) { vx_enum dir = 0; vx_enum type = 0; status |= vxQueryParameter(param, VX_PARAMETER_ATTRIBUTE_DIRECTION, &dir, sizeof(dir)); status |= vxQueryParameter(param, VX_PARAMETER_ATTRIBUTE_TYPE, &type, sizeof(type)); printf("graph.parameter[%u] dir:%d type:%08x\n", p, dir, type); vxReleaseParameter(¶m); } else { printf("Invalid parameter retrieved from graph!\n"); } } status |= vxSetGraphParameterByIndex(graph, 0, (vx_reference)images[0]); status |= vxSetGraphParameterByIndex(graph, 1, (vx_reference)images[1]); } status |= vxVerifyGraph(graph); if (status == VX_SUCCESS) { status = vxProcessGraph(graph); if (status == VX_SUCCESS) { printf("Ran Graph!\n"); } } vxReleaseGraph(&graph); } else { printf("Failed to create graph!\n"); } vxReleaseContext(&context); } else { printf("failed to create context!\n"); } return status; }
vxContext() { ctx = vxCreateContext(); vxErr::check(ctx); }
int main(int argc, char *argv[]) { vx_status status = VX_FAILURE; vx_context context = vxCreateContext(); if (argc < 2) { usage(argv[0]); goto relCtx; } vx_char *srcfilename = argv[1]; printf("src img: %s\n", srcfilename); FILE *fp = fopen(srcfilename, "r"); if (!fp) { goto relCtx; } char pgmstr[1024]; unsigned int n; n = fread(pgmstr, 1, sizeof(pgmstr), fp); if (n != sizeof(pgmstr)) { goto relClose; } const char delim = '\n'; const char *token = NULL; unsigned int width, height; // PGM P5 magic string token = strtok(pgmstr, &delim); // PGM author token = strtok(NULL, &delim); // PGM image size token = strtok(NULL, &delim); sscanf(token, "%u %u", &width, &height); printf("width:%u height:%u\n", width, height); status = vxGetStatus((vx_reference)context); if (status != VX_SUCCESS) { fprintf(stderr, "error: vxCreateContext\n"); goto relClose; } vx_rectangle_t rect = {1, 1, width + 1, height + 1}; vx_uint32 i = 0; vx_image images[] = { vxCreateImage(context, width + 2, height + 2, VX_DF_IMAGE_U8), // 0:input vxCreateImageFromROI(images[0], &rect), // 1:ROI input vxCreateImage(context, width, height, VX_DF_IMAGE_U8), // 2:box vxCreateImage(context, width, height, VX_DF_IMAGE_U8), // 3:gaussian vxCreateImage(context, width, height, VX_DF_IMAGE_U8), // 4:alpha vxCreateImage(context, width, height, VX_DF_IMAGE_S16),// 5:add }; vx_float32 a = 0.5f; vx_scalar alpha = vxCreateScalar(context, VX_TYPE_FLOAT32, &a); status |= vxLoadKernels(context, "openvx-tiling"); status |= vxLoadKernels(context, "openvx-debug"); if (status != VX_SUCCESS) { fprintf(stderr, "error: vxLoadKernels %d\n", status); goto relImg; } vx_graph graph = vxCreateGraph(context); status = vxGetStatus((vx_reference)context); if (status != VX_SUCCESS) { fprintf(stderr, "error: vxGetStatus\n"); goto relKern; } ax_node_t axnodes[] = { { vxFReadImageNode(graph, srcfilename, images[1]), "Read" }, { vxTilingBoxNode(graph, images[1], images[2], 5, 5), "Box" }, { vxFWriteImageNode(graph, images[2], "ot_box.pgm"), "Write" }, { vxTilingGaussianNode(graph, images[1], images[3]), "Gaussian" }, { vxFWriteImageNode(graph, images[3], "ot_gauss.pgm"), "Write" }, { vxTilingAlphaNode(graph, images[1], alpha, images[4]), "Alpha" }, { vxFWriteImageNode(graph, images[4], "ot_alpha.pgm"), "Write" }, { vxTilingAddNode(graph, images[1], images[4], images[5]), "Add" }, { vxFWriteImageNode(graph, images[5], "ot_add.pgm"), "Write" }, }; for (i = 0; i < dimof(axnodes); i++) { if (axnodes[i].node == 0) { fprintf(stderr, "error: Failed to create node[%u]\n", i); status = VX_ERROR_INVALID_NODE; goto relNod; } } status = vxVerifyGraph(graph); if (status != VX_SUCCESS) { fprintf(stderr, "error: vxVerifyGraph %d\n", status); goto relNod; } status = vxProcessGraph(graph); if (status != VX_SUCCESS) { fprintf(stderr, "error: vxProcessGraph %d\n", status); goto relNod; } // perf timings vx_perf_t perf_node; vx_perf_t perf_graph; vxQueryGraph(graph, VX_GRAPH_ATTRIBUTE_PERFORMANCE, &perf_graph, sizeof(perf_graph)); axPrintPerf("Graph", &perf_graph); for (i = 0; i < dimof(axnodes); ++i) { vxQueryNode(axnodes[i].node, VX_NODE_ATTRIBUTE_PERFORMANCE, &perf_node, sizeof(perf_node)); axPrintPerf(axnodes[i].name, &perf_node); } relNod: for (i = 0; i < dimof(axnodes); i++) { vxReleaseNode(&axnodes[i].node); } vxReleaseGraph(&graph); relKern: relImg: for (i = 0; i < dimof(images); i++) { vxReleaseImage(&images[i]); } relClose: fclose(fp); relCtx: vxReleaseContext(&context); printf("%s::main() returns = %d\n", argv[0], status); return (int)status; }
int main(int argc, char *argv[]) { vx_status status = VX_SUCCESS; vx_context context = vxCreateContext(); if (vxGetStatus((vx_reference)context) == VX_SUCCESS) { vx_char implementation[VX_MAX_IMPLEMENTATION_NAME]; vx_char *extensions = NULL; vx_int32 m, modules = 0; vx_uint32 k, kernels = 0; vx_uint32 p, parameters = 0; vx_uint32 a = 0; vx_uint16 vendor, version; vx_size size = 0; vx_kernel_info_t *table = NULL; // take each arg as a module name to load for (m = 1; m < argc; m++) { if (vxLoadKernels(context, argv[m]) != VX_SUCCESS) printf("Failed to load module %s\n", argv[m]); else printf("Loaded module %s\n", argv[m]); } vxPrintAllLog(context); vxRegisterHelperAsLogReader(context); vxQueryContext(context, VX_CONTEXT_VENDOR_ID, &vendor, sizeof(vendor)); vxQueryContext(context, VX_CONTEXT_VERSION, &version, sizeof(version)); vxQueryContext(context, VX_CONTEXT_IMPLEMENTATION, implementation, sizeof(implementation)); vxQueryContext(context, VX_CONTEXT_MODULES, &modules, sizeof(modules)); vxQueryContext(context, VX_CONTEXT_EXTENSIONS_SIZE, &size, sizeof(size)); printf("implementation=%s (%02x:%02x) has %u modules\n", implementation, vendor, version, modules); extensions = malloc(size); if (extensions) { vx_char *line = extensions, *token = NULL; vxQueryContext(context, VX_CONTEXT_EXTENSIONS, extensions, size); do { token = strtok(line, " "); if (token) printf("extension: %s\n", token); line = NULL; } while (token); free(extensions); } status = vxQueryContext(context, VX_CONTEXT_UNIQUE_KERNELS, &kernels, sizeof(kernels)); if (status != VX_SUCCESS) goto exit; printf("There are %u kernels\n", kernels); size = kernels * sizeof(vx_kernel_info_t); table = malloc(size); status = vxQueryContext(context, VX_CONTEXT_UNIQUE_KERNEL_TABLE, table, size); for (k = 0; k < kernels && table != NULL && status == VX_SUCCESS; k++) { vx_kernel kernel = vxGetKernelByEnum(context, table[k].enumeration); if (kernel && vxGetStatus((vx_reference)kernel) == VX_SUCCESS) { status = vxQueryKernel(kernel, VX_KERNEL_PARAMETERS, ¶meters, sizeof(parameters)); printf("\t\tkernel[%u]=%s has %u parameters (%d)\n", table[k].enumeration, table[k].name, parameters, status); for (p = 0; p < parameters; p++) { vx_parameter parameter = vxGetKernelParameterByIndex(kernel, p); vx_enum type = VX_TYPE_INVALID, dir = VX_INPUT; vx_uint32 tIdx, dIdx; status = VX_SUCCESS; status |= vxQueryParameter(parameter, VX_PARAMETER_TYPE, &type, sizeof(type)); status |= vxQueryParameter(parameter, VX_PARAMETER_DIRECTION, &dir, sizeof(dir)); for (tIdx = 0; tIdx < dimof(parameter_names); tIdx++) if (parameter_names[tIdx].tenum == type) break; for (dIdx = 0; dIdx < dimof(direction_names); dIdx++) if (direction_names[dIdx].tenum == dir) break; if (status == VX_SUCCESS) printf("\t\t\tparameter[%u] type:%s dir:%s\n", p, parameter_names[tIdx].name, direction_names[dIdx].name); vxReleaseParameter(¶meter); } for (a = 0; a < dimof(attribute_names); a++) { switch (attribute_names[a].type) { case VX_TYPE_SIZE: { vx_size value = 0; if (VX_SUCCESS == vxQueryKernel(kernel, attribute_names[a].tenum, &value, sizeof(value))) printf("\t\t\tattribute[%u] %s = "VX_FMT_SIZE"\n", attribute_names[a].tenum & VX_ATTRIBUTE_ID_MASK, attribute_names[a].name, value); break; } case VX_TYPE_UINT32: { vx_uint32 value = 0; if (VX_SUCCESS == vxQueryKernel(kernel, attribute_names[a].tenum, &value, sizeof(value))) printf("\t\t\tattribute[%u] %s = %u\n", attribute_names[a].tenum & VX_ATTRIBUTE_ID_MASK, attribute_names[a].name, value); break; } default: break; } } vxReleaseKernel(&kernel); } else { printf("ERROR: kernel %s is invalid (%d) !\n", table[k].name, status); } } for (m = 1; m < argc; m++) { if (vxUnloadKernels(context, argv[m]) != VX_SUCCESS) printf("Failed to unload module %s\n", argv[m]); else printf("Unloaded module %s\n", argv[m]); } exit: if (table) free(table); vxReleaseContext(&context); } return 0; }
/*! * \brief An example of an super resolution algorithm. * \ingroup group_example */ int example_super_resolution(int argc, char *argv[]) { vx_status status = VX_SUCCESS; vx_uint32 image_index = 0, max_num_images = 4; vx_uint32 width = 640; vx_uint32 i = 0; vx_uint32 winSize = 32; vx_uint32 height = 480; vx_int32 sens_thresh = 20; vx_float32 alpha = 0.2f; vx_float32 tau = 0.5f; vx_enum criteria = VX_TERM_CRITERIA_BOTH; // lk params vx_float32 epsilon = 0.01; vx_int32 num_iterations = 10; vx_bool use_initial_estimate = vx_true_e; vx_int32 min_distance = 5; // harris params vx_float32 sensitivity = 0.04; vx_int32 gradient_size = 3; vx_int32 block_size = 3; vx_context context = vxCreateContext(); vx_scalar alpha_s = vxCreateScalar(context, VX_TYPE_FLOAT32, &alpha); vx_scalar tau_s = vxCreateScalar(context, VX_TYPE_FLOAT32, &tau); vx_matrix matrix_forward = vxCreateMatrix(context, VX_TYPE_FLOAT32, 3, 3); vx_matrix matrix_backwords = vxCreateMatrix(context, VX_TYPE_FLOAT32, 3, 3); vx_array old_features = vxCreateArray(context, VX_TYPE_KEYPOINT, 1000); vx_array new_features = vxCreateArray(context, VX_TYPE_KEYPOINT, 1000); vx_scalar epsilon_s = vxCreateScalar(context, VX_TYPE_FLOAT32, &epsilon); vx_scalar num_iterations_s = vxCreateScalar(context, VX_TYPE_INT32, &num_iterations); vx_scalar use_initial_estimate_s = vxCreateScalar(context, VX_TYPE_BOOL, &use_initial_estimate); vx_scalar min_distance_s = vxCreateScalar(context, VX_TYPE_INT32, &min_distance); vx_scalar sensitivity_s = vxCreateScalar(context, VX_TYPE_FLOAT32, &sensitivity); vx_scalar sens_thresh_s = vxCreateScalar(context, VX_TYPE_INT32, &sens_thresh); vx_scalar num_corners = vxCreateScalar(context, VX_TYPE_SIZE, NULL); if (vxGetStatus((vx_reference)context) == VX_SUCCESS) { vx_image images[] = { vxCreateImage(context, width, height, VX_DF_IMAGE_UYVY), // index 0: vxCreateImage(context, width, height, VX_DF_IMAGE_U8), // index 1: Get Y channel vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_U8), // index 2: scale up to high res. vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_U8), // index 3: back wrap: transform to the original Image. vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_U8), // index 4: guassian blur vxCreateImage(context, width, height, VX_DF_IMAGE_U8), // index 5: scale down vxCreateImage(context, width, height, VX_DF_IMAGE_S16), // index 6: Subtract the transformed Image with original moved Image vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_S16), // index 7: Scale Up the delta image. vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_S16), // index 8: Guassian blur the delta Image vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_S16), // index 9: forward wrap: tranform the deltas back to the high res Image vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_U8), // index 10: accumulate sum? vxCreateImage(context, width, height, VX_DF_IMAGE_U8), // index 11: Get U channel vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_U8), // index 12: scale up to high res. vxCreateImage(context, width, height, VX_DF_IMAGE_U8), // index 13: Get V channel vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_U8), // index 14: scale up to high res. vxCreateImage(context, width, height, VX_DF_IMAGE_UYVY), // index 15: output image vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_U8), // index 16: original y image scaled vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_U8), // index 17: difference image for last calculation }; vx_pyramid pyramid_new = vxCreatePyramid(context, 4, 2, width, height, VX_DF_IMAGE_U8); vx_pyramid pyramid_old = vxCreatePyramid(context, 4, 2, width, height, VX_DF_IMAGE_U8); vx_graph graphs[] = { vxCreateGraph(context), vxCreateGraph(context), vxCreateGraph(context), vxCreateGraph(context), }; vxLoadKernels(context, "openvx-debug"); if (vxGetStatus((vx_reference)graphs[0]) == VX_SUCCESS) { vxChannelExtractNode(graphs[0], images[0], VX_CHANNEL_Y, images[1]); // One iteration of super resolution calculation vxScaleImageNode(graphs[0], images[1], images[2], VX_INTERPOLATION_TYPE_BILINEAR); vxWarpPerspectiveNode(graphs[0], images[2], matrix_forward, 0, images[3]); vxGaussian3x3Node(graphs[0], images[3], images[4]); vxScaleImageNode(graphs[0], images[4], images[5], VX_INTERPOLATION_TYPE_BILINEAR); vxSubtractNode(graphs[0], images[5], images[16], VX_CONVERT_POLICY_SATURATE, images[6]); vxScaleImageNode(graphs[0], images[6], images[7], VX_INTERPOLATION_TYPE_BILINEAR); vxGaussian3x3Node(graphs[0], images[7], images[8]); vxWarpPerspectiveNode(graphs[0], images[8], matrix_backwords, 0, images[9]); vxAccumulateWeightedImageNode(graphs[0], images[9], alpha_s, images[10]); } if (vxGetStatus((vx_reference)graphs[1]) == VX_SUCCESS) { vxChannelExtractNode(graphs[1], images[0], VX_CHANNEL_Y, images[1]); // One iteration of super resolution calculation vxGaussianPyramidNode(graphs[1], images[1], pyramid_new); vxOpticalFlowPyrLKNode(graphs[1], pyramid_old, pyramid_new, old_features, old_features, new_features, criteria, epsilon_s, num_iterations_s, use_initial_estimate_s, winSize); } if (vxGetStatus((vx_reference)graphs[2]) == VX_SUCCESS) { vxChannelExtractNode(graphs[2], images[0], VX_CHANNEL_Y, images[1]); // One iteration of super resolution calculation vxHarrisCornersNode(graphs[2], images[1], sens_thresh_s, min_distance_s, sensitivity_s, gradient_size, block_size, old_features, num_corners); vxGaussianPyramidNode(graphs[2], images[1], pyramid_old); vxScaleImageNode(graphs[2], images[1], images[16], VX_INTERPOLATION_TYPE_BILINEAR); } if (vxGetStatus((vx_reference)graphs[3]) == VX_SUCCESS) { vxSubtractNode(graphs[3], images[10], images[16], VX_CONVERT_POLICY_SATURATE, images[17]); vxAccumulateWeightedImageNode(graphs[3], images[17], tau_s, images[16]); vxChannelExtractNode(graphs[3], images[16], VX_CHANNEL_U, images[11]); vxScaleImageNode(graphs[3], images[11], images[12], VX_INTERPOLATION_TYPE_BILINEAR); // upscale the u channel vxChannelExtractNode(graphs[3], images[0], VX_CHANNEL_V, images[13]); vxScaleImageNode(graphs[3], images[13], images[14], VX_INTERPOLATION_TYPE_BILINEAR); // upscale the v channel vxChannelCombineNode(graphs[3], images[10], images[12], images[14], 0, images[15]); // recombine the channels } status = VX_SUCCESS; status |= vxVerifyGraph(graphs[0]); status |= vxVerifyGraph(graphs[1]); status |= vxVerifyGraph(graphs[2]); status |= vxVerifyGraph(graphs[3]); if (status == VX_SUCCESS) { /* read the initial image in */ status |= vxuFReadImage(context, "c:\\work\\super_res\\superres_1_UYVY.yuv", images[0]); /* compute the "old" pyramid */ status |= vxProcessGraph(graphs[2]); /* for each input image, read it in and run graphs[1] and [0]. */ for (image_index = 1; image_index < max_num_images; image_index++) { char filename[256]; sprintf(filename, "c:\\work\\super_res\\superres_%d_UYVY.yuv", image_index + 1); status |= vxuFReadImage(context, filename, images[0]); status |= vxProcessGraph(graphs[1]); userCalculatePerspectiveTransformFromLK(matrix_forward, matrix_backwords, old_features, new_features); status |= vxProcessGraph(graphs[0]); } /* run the final graph */ status |= vxProcessGraph(graphs[3]); /* save the output */ status |= vxuFWriteImage(context, images[15], "superres_UYVY.yuv"); } vxReleaseGraph(&graphs[0]); vxReleaseGraph(&graphs[1]); vxReleaseGraph(&graphs[2]); vxReleaseGraph(&graphs[3]); for (i = 0; i < dimof(images); i++) { vxReleaseImage(&images[i]); } vxReleasePyramid(&pyramid_new); vxReleasePyramid(&pyramid_old); } vxReleaseMatrix(&matrix_forward); vxReleaseMatrix(&matrix_backwords); vxReleaseScalar(&alpha_s); vxReleaseScalar(&tau_s); /* Release the context last */ vxReleaseContext(&context); return status; }
//************************************************************************** // EXPORTED FUNCTIONS //************************************************************************** static void Initialize(JNIEnv *env, jobject obj) { vx_context context = vxCreateContext(); SetHandle(env, obj, ContextClass, handleName, (jlong)context); }
int main(int argc, char **argv) { int i; vx_status status; vx_set_debug_zone(VX_ZONE_ERROR); //vx_set_debug_zone(VX_ZONE_WARNING); //vx_set_debug_zone(VX_ZONE_INFO); vx_context context = vxCreateContext(); CHECK_NOT_NULL(context, "vxCreateContext"); printf("Success create vx_context!!\n\n"); vxInitLog(&helper_log); vxRegisterLogCallback(context, &vxHelperLogCallback, vx_false_e); Mat src = imread(SRC_IMG_NAME); CHECK_NOT_NULL(src.data, "imread"); resize(src, src, Size(IMG_WIDTH,IMG_HEIGHT)); cvtColor(src, src, CV_RGB2GRAY); for(i=0; i<1; i++) { Mat result_cv(IMG_HEIGHT,IMG_WIDTH,CV_8UC1); Mat result_vx(IMG_HEIGHT,IMG_WIDTH,CV_8UC1); printf("Start to run not_box3x3_graph()\n"); not_box3x3_cv(src.clone(), result_cv); status = not_box3x3_graph(context, src.clone(), result_vx); printf("Return from not_box3x3_graph() result_vx: %d\n", status); if(verify_result(result_cv, result_vx)) printf("Verify passed!!\n"); else printf("Verify fail!!\n"); printf("\n"); //imwrite("not_box3x3_cv.jpg",result_cv); //imwrite("not_box3x3_vx.jpg",result_vx); printf("Start to run not_not_graph()\n"); not_not_cv(src.clone(), result_cv); status = not_not_graph(context, src.clone(), result_vx); printf("Return from not_not_graph() result_vx: %d\n", status); if(verify_result(result_cv, result_vx)) printf("Verify passed!!\n"); else printf("Verify fail!!\n"); printf("\n"); printf("Start to run not_graph()\n"); not_cv(src.clone(), result_cv); status = not_graph(context, src.clone(), result_vx); printf("Return from not_not_graph() result_vx: %d\n", status); if(verify_result(result_cv, result_vx)) printf("Verify passed!!\n"); else printf("Verify fail!!\n"); printf("\n"); //imwrite("result_cv.jpg",result_cv); //imwrite("result_vx.jpg",result_vx); } status = vxReleaseContext(&context); CHECK_STATUS(status, "vxReleaseContext"); printf("%s done!!\n", argv[0]); return 0; }