vx_status vxuHarrisScore(vx_context context, vx_image gx, vx_image gy, vx_scalar sensitivity, vx_scalar block_size, vx_image score) { vx_status status = VX_FAILURE; vx_graph graph = vxCreateGraph(context); if (graph) { vx_node node = vxHarrisScoreNode(graph, gx, gy, sensitivity, block_size, score); if (node) { status = vxVerifyGraph(graph); if (status == VX_SUCCESS) { status = vxProcessGraph(graph); } vxReleaseNode(&node); } vxClearLog((vx_reference)graph); vxReleaseGraph(&graph); } return status; }
static VALUE Graph_verify(VALUE self) { vx_graph graph = 0; vx_status status = VX_FAILURE; Check_Type(self, T_DATA); graph = (vx_graph)DATA_PTR(self); status = vxVerifyGraph(graph); REXT_PRINT("status = %d\n", status); switch (status) { case VX_SUCCESS: break; default: rb_raise(rb_eException, "Verify failed."); break; } return Qnil; }
//! [vxu] vx_status vxuXYZ(vx_context context, vx_image input, vx_uint32 value, vx_image output, vx_array temp) { vx_status status = VX_FAILURE; vx_graph graph = vxCreateGraph(context); if (graph) { vx_node node = vxXYZNode(graph, input, value, output, temp); if (node) { status = vxVerifyGraph(graph); if (status == VX_SUCCESS) { status = vxProcessGraph(graph); } vxReleaseNode(&node); } vxReleaseGraph(&graph); } return status; }
vx_status vxuLaplacian3x3(vx_context context, vx_image input, vx_image output) { vx_status status = VX_FAILURE; vx_graph graph = vxCreateGraph(context); if (graph) { vx_node node = vxLaplacian3x3Node(graph, input, output); if (node) { status = vxVerifyGraph(graph); if (status == VX_SUCCESS) { status = vxProcessGraph(graph); } vxReleaseNode(&node); } vxClearLog((vx_reference)graph); vxReleaseGraph(&graph); } return status; }
vx_status vxuNonMaxSuppression(vx_context context, vx_image mag, vx_image phase, vx_image edge) { vx_status status = VX_SUCCESS; vx_graph graph = vxCreateGraph(context); if (graph) { vx_node node = vxNonMaxSuppressionNode(graph, mag, phase, edge); if (node) { status = vxVerifyGraph(graph); if (status == VX_SUCCESS) { status = vxProcessGraph(graph); } vxReleaseNode(&node); } vxClearLog((vx_reference)graph); vxReleaseGraph(&graph); } return status; }
vx_status vxuSobelMxN(vx_context context, vx_image input, vx_scalar win, vx_image gx, vx_image gy) { vx_status status = VX_FAILURE; vx_graph graph = vxCreateGraph(context); if (graph) { vx_node node = vxSobelMxNNode(graph, input, win, gx, gy); if (node) { status = vxVerifyGraph(graph); if (status == VX_SUCCESS) { status = vxProcessGraph(graph); } vxReleaseNode(&node); } vxClearLog((vx_reference)graph); vxReleaseGraph(&graph); } return status; }
vx_status vxuImageLister(vx_context context, vx_image input, vx_array arr, vx_scalar num_points) { vx_status status = VX_FAILURE; vx_graph graph = vxCreateGraph(context); if (graph) { vx_node node = vxImageListerNode(graph, input, arr, num_points); if (node) { status = vxVerifyGraph(graph); if (status == VX_SUCCESS) { status = vxProcessGraph(graph); } vxReleaseNode(&node); } vxClearLog((vx_reference)graph); vxReleaseGraph(&graph); } return status; }
int main(void) { vx_context context = vxCreateContext(); vx_uint8 value = 8; vx_graph graph = vxCreateGraph(context); vx_image images[] = { vxCreateUniformImage(context, 640, 480, VX_DF_IMAGE_U8, &value), vxCreateImage(context, 640, 480, VX_DF_IMAGE_U8) }; vx_image intermediate_images[] = { vxCreateVirtualImage(graph, 0, 0, VX_DF_IMAGE_S16) }; /*Create the depth conversion nodes*/ vx_uint32 uint_value2 = 0; vx_scalar vx_value2 = vxCreateScalar(context, VX_TYPE_INT32, &uint_value2); vx_uint32 uint_value1 = 0; vx_scalar vx_value1 = vxCreateScalar(context, VX_TYPE_INT32, &uint_value1); /* The order in which these two nodes are created should not matter. */ vxConvertDepthNode(graph, intermediate_images[0], images[1], VX_CONVERT_POLICY_SATURATE, vx_value2); vxConvertDepthNode(graph, images[0], intermediate_images[0], VX_CONVERT_POLICY_SATURATE, vx_value1); vx_status status = vxVerifyGraph(graph); if (status == VX_SUCCESS) { status = vxProcessGraph(graph); } if (status != VX_SUCCESS) { fprintf(stderr, "badness\n"); abort (); } exit (0); }
vx_status vxuEuclideanNonMax(vx_context context, vx_image input, vx_scalar strength_thresh, vx_scalar min_distance, vx_image output) { vx_status status = VX_FAILURE; vx_graph graph = vxCreateGraph(context); if (graph) { vx_node node = vxEuclideanNonMaxNode(graph, input, strength_thresh, min_distance, output); if (node) { status = vxVerifyGraph(graph); if (status == VX_SUCCESS) { status = vxProcessGraph(graph); } vxReleaseNode(&node); } vxClearLog((vx_reference)graph); vxReleaseGraph(&graph); } return status; }
/*! \brief The graph factory example. * \ingroup group_example */ int main(int argc, char *argv[]) { vx_status status = VX_SUCCESS; vx_context context = vxCreateContext(); if (context) { vx_image images[] = { vxCreateImage(context, 640, 480, VX_DF_IMAGE_U8), vxCreateImage(context, 640, 480, VX_DF_IMAGE_S16), }; vx_graph graph = vxGraphFactory(context, VX_GRAPH_FACTORY_EDGE); if (graph) { vx_uint32 p, num = 0; status |= vxQueryGraph(graph, VX_GRAPH_ATTRIBUTE_NUMPARAMETERS, &num, sizeof(num)); if (status == VX_SUCCESS) { printf("There are %u parameters to this graph!\n", num); for (p = 0; p < num; p++) { vx_parameter param = vxGetGraphParameterByIndex(graph, p); if (param) { vx_enum dir = 0; vx_enum type = 0; status |= vxQueryParameter(param, VX_PARAMETER_ATTRIBUTE_DIRECTION, &dir, sizeof(dir)); status |= vxQueryParameter(param, VX_PARAMETER_ATTRIBUTE_TYPE, &type, sizeof(type)); printf("graph.parameter[%u] dir:%d type:%08x\n", p, dir, type); vxReleaseParameter(¶m); } else { printf("Invalid parameter retrieved from graph!\n"); } } status |= vxSetGraphParameterByIndex(graph, 0, (vx_reference)images[0]); status |= vxSetGraphParameterByIndex(graph, 1, (vx_reference)images[1]); } status |= vxVerifyGraph(graph); if (status == VX_SUCCESS) { status = vxProcessGraph(graph); if (status == VX_SUCCESS) { printf("Ran Graph!\n"); } } vxReleaseGraph(&graph); } else { printf("Failed to create graph!\n"); } vxReleaseContext(&context); } else { printf("failed to create context!\n"); } return status; }
//////// // main() has all the OpenVX application code for this exercise. // Command-line usage: // % solution_exercise3 [<video-sequence>|<camera-device-number>] // When neither video sequence nor camera device number is specified, // it defaults to the video sequence in "PETS09-S1-L1-View001.avi". int main( int argc, char * argv[] ) { // Get default video sequence when nothing is specified on command-line and // instantiate OpenCV GUI module for reading input RGB images and displaying // the image with OpenVX results const char * video_sequence = argv[1]; CGuiModule gui( video_sequence ); // Try grab first video frame from the sequence using cv::VideoCapture // and check if video frame is available if( !gui.Grab() ) { printf( "ERROR: input has no video\n" ); return 1; } //////// // Set the application configuration parameters. Note that input video // sequence is an 8-bit RGB image with dimensions given by gui.GetWidth() // and gui.GetHeight(). The parameters for the tensors are: // tensor_dims - 3 dimensions of tensor [3 x <width> x <height>] // tensor_input_fixed_point_pos - fixed-point position for input tensor // tensor_output_fixed_point_pos - fixed-point position for output tensor vx_uint32 width = gui.GetWidth(); vx_uint32 height = gui.GetHeight(); vx_size tensor_dims[3] = { width, height, 3 }; // 3 channels (RGB) vx_uint8 tensor_input_fixed_point_pos = 5; // input[-128..127] will be mapped to -4..3.96875 vx_uint8 tensor_output_fixed_point_pos = 7; // output[-1..1] will be mapped to -128 to 128 //////// // Create the OpenVX context and make sure returned context is valid and // register the log_callback to receive messages from OpenVX framework. vx_context context = vxCreateContext(); ERROR_CHECK_OBJECT( context ); vxRegisterLogCallback( context, log_callback, vx_false_e ); //////// // Register user kernels with the context. // // TODO:******** // 1. Register user kernel with context by calling your implementation of "registerUserKernel()". ERROR_CHECK_STATUS( registerUserKernel( context ) ); //////// // Create OpenVX tensor objects for input and output // // TODO:******** // 1. Create tensor objects using tensor_dims, tensor_input_fixed_point_pos, and // tensor_output_fixed_point_pos vx_tensor input_tensor = vxCreateTensor( context, 3, tensor_dims, VX_TYPE_INT16, tensor_input_fixed_point_pos ); vx_tensor output_tensor = vxCreateTensor( context, 3, tensor_dims, VX_TYPE_INT16, tensor_output_fixed_point_pos ); ERROR_CHECK_OBJECT( input_tensor ); ERROR_CHECK_OBJECT( output_tensor ); //////// // Create, build, and verify the graph with user kernel node. // // TODO:******** // 1. Build a graph with just one node created using userTensorCosNode() vx_graph graph = vxCreateGraph( context ); ERROR_CHECK_OBJECT( graph ); vx_node cos_node = userTensorCosNode( graph, input_tensor, output_tensor ); ERROR_CHECK_OBJECT( cos_node ); ERROR_CHECK_STATUS( vxReleaseNode( &cos_node ) ); ERROR_CHECK_STATUS( vxVerifyGraph( graph ) ); //////// // Process the video sequence frame by frame until the end of sequence or aborted. cv::Mat bgrMatForOutputDisplay( height, width, CV_8UC3 ); for( int frame_index = 0; !gui.AbortRequested(); frame_index++ ) { //////// // Copy input RGB frame from OpenCV into input_tensor with UINT8 to Q10.5 (INT16) conversion. // In order to do this, vxMapTensorPatch API (see "vx_ext_amd.h"). // // TODO:******** // 1. Use vxMapTensorPatch API for access to input tensor object for writing // 2. Copy UINT8 data from OpenCV RGB image to tensor object // 3. Use vxUnmapTensorPatch API to return control of buffer back to framework vx_uint8 * cv_rgb_image_buffer = gui.GetBuffer(); vx_size rgb_stride = gui.GetStride(); vx_size zeros[3] = { 0 }; vx_size tensor_stride[3]; vx_map_id map_id; vx_uint8 * buf; ERROR_CHECK_STATUS( vxMapTensorPatch( input_tensor, 3, zeros, tensor_dims, &map_id, tensor_stride, (void **)&buf, VX_WRITE_ONLY, VX_MEMORY_TYPE_HOST, 0 ) ); for( vx_size c = 0; c < 3; c++ ) { for( vx_size y = 0; y < height; y++ ) { const vx_uint8 * img = cv_rgb_image_buffer + y * rgb_stride + c; vx_int16 * inp = (vx_int16 *)(buf + y * tensor_stride[1] + c * tensor_stride[2]); for( vx_size x = 0; x < width; x++ ) { // convert 0..255 to Q10.5 [-4..3.96875 range] fixed-point format inp[x] = (vx_int16)img[x * 3] - 128; } } } ERROR_CHECK_STATUS( vxUnmapTensorPatch( input_tensor, map_id ) ); //////// // Now that input tensor is ready, just run the graph. // // TODO:******** // 1. Call vxProcessGraph to execute the tensor_cos kernel in graph ERROR_CHECK_STATUS( vxProcessGraph( graph ) ); //////// // Display the output tensor object as RGB image // // TODO:******** // 1. Use vxMapTensorPatch API for access to output tensor object for reading // 2. Copy tensor object data into OpenCV RGB image // 3. Use vxUnmapTensorPatch API to return control of buffer back to framework ERROR_CHECK_STATUS( vxMapTensorPatch( output_tensor, 3, zeros, tensor_dims, &map_id, tensor_stride, (void **)&buf, VX_WRITE_ONLY, VX_MEMORY_TYPE_HOST, 0 ) ); vx_uint8 * cv_bgr_image_buffer = bgrMatForOutputDisplay.data; vx_size bgr_stride = bgrMatForOutputDisplay.step; for( vx_size c = 0; c < 3; c++ ) { for( vx_size y = 0; y < height; y++ ) { const vx_int16 * out = (const vx_int16 *)(buf + y * tensor_stride[1] + c * tensor_stride[2]); vx_uint8 * img = cv_bgr_image_buffer + y * bgr_stride + (2 - c); // (2 - c) for RGB to BGR conversion for( vx_size x = 0; x < width; x++ ) { // scale convert Q8.7 [-1..1 range] fixed-point format to 0..255 with saturation vx_int16 value = out[x] + 128; value = value > 255 ? 255 : value; // saturation needed img[x * 3] = (vx_uint8)value; } } } #if ENABLE_DISPLAY cv::imshow( "Cosine", bgrMatForOutputDisplay ); #endif ERROR_CHECK_STATUS( vxUnmapTensorPatch( output_tensor, map_id ) ); //////// // Display the results and grab the next input RGB frame for the next iteration. char text[128]; sprintf( text, "Keyboard ESC/Q-Quit SPACE-Pause [FRAME %d] [fixed_point_pos input:%d output:%d]", frame_index, tensor_input_fixed_point_pos, tensor_output_fixed_point_pos ); gui.DrawText( 0, 16, text ); gui.Show(); if( !gui.Grab() ) { // Terminate the processing loop if the end of sequence is detected. gui.WaitForKey(); break; } } //////// // To release an OpenVX object, you need to call vxRelease<Object> API which takes a pointer to the object. // If the release operation is successful, the OpenVX framework will reset the object to NULL. // // TODO:**** // 1. Release graph and tensor objects ERROR_CHECK_STATUS( vxReleaseGraph( &graph ) ); ERROR_CHECK_STATUS( vxReleaseTensor( &input_tensor ) ); ERROR_CHECK_STATUS( vxReleaseTensor( &output_tensor ) ); ERROR_CHECK_STATUS( vxReleaseContext( &context ) ); return 0; }
int main(int argc, char *argv[]) { vx_status status = VX_FAILURE; vx_context context = vxCreateContext(); if (argc < 2) { usage(argv[0]); goto relCtx; } vx_char *srcfilename = argv[1]; printf("src img: %s\n", srcfilename); FILE *fp = fopen(srcfilename, "r"); if (!fp) { goto relCtx; } char pgmstr[1024]; unsigned int n; n = fread(pgmstr, 1, sizeof(pgmstr), fp); if (n != sizeof(pgmstr)) { goto relClose; } const char delim = '\n'; const char *token = NULL; unsigned int width, height; // PGM P5 magic string token = strtok(pgmstr, &delim); // PGM author token = strtok(NULL, &delim); // PGM image size token = strtok(NULL, &delim); sscanf(token, "%u %u", &width, &height); printf("width:%u height:%u\n", width, height); status = vxGetStatus((vx_reference)context); if (status != VX_SUCCESS) { fprintf(stderr, "error: vxCreateContext\n"); goto relClose; } vx_rectangle_t rect = {1, 1, width + 1, height + 1}; vx_uint32 i = 0; vx_image images[] = { vxCreateImage(context, width + 2, height + 2, VX_DF_IMAGE_U8), // 0:input vxCreateImageFromROI(images[0], &rect), // 1:ROI input vxCreateImage(context, width, height, VX_DF_IMAGE_U8), // 2:box vxCreateImage(context, width, height, VX_DF_IMAGE_U8), // 3:gaussian vxCreateImage(context, width, height, VX_DF_IMAGE_U8), // 4:alpha vxCreateImage(context, width, height, VX_DF_IMAGE_S16),// 5:add }; vx_float32 a = 0.5f; vx_scalar alpha = vxCreateScalar(context, VX_TYPE_FLOAT32, &a); status |= vxLoadKernels(context, "openvx-tiling"); status |= vxLoadKernels(context, "openvx-debug"); if (status != VX_SUCCESS) { fprintf(stderr, "error: vxLoadKernels %d\n", status); goto relImg; } vx_graph graph = vxCreateGraph(context); status = vxGetStatus((vx_reference)context); if (status != VX_SUCCESS) { fprintf(stderr, "error: vxGetStatus\n"); goto relKern; } ax_node_t axnodes[] = { { vxFReadImageNode(graph, srcfilename, images[1]), "Read" }, { vxTilingBoxNode(graph, images[1], images[2], 5, 5), "Box" }, { vxFWriteImageNode(graph, images[2], "ot_box.pgm"), "Write" }, { vxTilingGaussianNode(graph, images[1], images[3]), "Gaussian" }, { vxFWriteImageNode(graph, images[3], "ot_gauss.pgm"), "Write" }, { vxTilingAlphaNode(graph, images[1], alpha, images[4]), "Alpha" }, { vxFWriteImageNode(graph, images[4], "ot_alpha.pgm"), "Write" }, { vxTilingAddNode(graph, images[1], images[4], images[5]), "Add" }, { vxFWriteImageNode(graph, images[5], "ot_add.pgm"), "Write" }, }; for (i = 0; i < dimof(axnodes); i++) { if (axnodes[i].node == 0) { fprintf(stderr, "error: Failed to create node[%u]\n", i); status = VX_ERROR_INVALID_NODE; goto relNod; } } status = vxVerifyGraph(graph); if (status != VX_SUCCESS) { fprintf(stderr, "error: vxVerifyGraph %d\n", status); goto relNod; } status = vxProcessGraph(graph); if (status != VX_SUCCESS) { fprintf(stderr, "error: vxProcessGraph %d\n", status); goto relNod; } // perf timings vx_perf_t perf_node; vx_perf_t perf_graph; vxQueryGraph(graph, VX_GRAPH_ATTRIBUTE_PERFORMANCE, &perf_graph, sizeof(perf_graph)); axPrintPerf("Graph", &perf_graph); for (i = 0; i < dimof(axnodes); ++i) { vxQueryNode(axnodes[i].node, VX_NODE_ATTRIBUTE_PERFORMANCE, &perf_node, sizeof(perf_node)); axPrintPerf(axnodes[i].name, &perf_node); } relNod: for (i = 0; i < dimof(axnodes); i++) { vxReleaseNode(&axnodes[i].node); } vxReleaseGraph(&graph); relKern: relImg: for (i = 0; i < dimof(images); i++) { vxReleaseImage(&images[i]); } relClose: fclose(fp); relCtx: vxReleaseContext(&context); printf("%s::main() returns = %d\n", argv[0], status); return (int)status; }
static vx_status VX_CALLBACK vxHarrisInitializer(vx_node node, vx_reference parameters[], vx_uint32 num) { vx_status status = VX_FAILURE; if (num == dimof(harris_kernel_params)) { vx_image src = (vx_image)parameters[0]; vx_scalar str = (vx_scalar)parameters[1]; vx_scalar min = (vx_scalar)parameters[2]; vx_scalar sen = (vx_scalar)parameters[3]; vx_scalar win = (vx_scalar)parameters[4]; vx_scalar blk = (vx_scalar)parameters[5]; vx_array arr = (vx_array)parameters[6]; vx_scalar num_corners = (vx_scalar)parameters[7]; vx_context c = vxGetContext((vx_reference)node); vx_graph g = vxCreateGraph(c); vxLoadKernels(c, "openvx-extras"); vxLoadKernels(c, "openvx-debug"); if (g) { vx_uint32 i = 0; vx_int32 ds = 4; vx_scalar shift = vxCreateScalar(c, VX_TYPE_INT32, &ds); vx_image virts[] = { vxCreateVirtualImage(g, 0, 0, VX_DF_IMAGE_VIRT), // Gx vxCreateVirtualImage(g, 0, 0, VX_DF_IMAGE_VIRT), // Gy vxCreateVirtualImage(g, 0, 0, VX_DF_IMAGE_VIRT), // Score vxCreateVirtualImage(g, 0, 0, VX_DF_IMAGE_VIRT), // Suppressed vxCreateVirtualImage(g, 0, 0, VX_DF_IMAGE_U8), // Shifted Suppressed Log10 }; vx_node nodes[] = { vxSobelMxNNode(g, src, win, virts[0], virts[1]), vxHarrisScoreNode(g, virts[0], virts[1], sen, blk, virts[2]), vxEuclideanNonMaxNode(g, virts[2], str, min, virts[3]), vxImageListerNode(g, virts[3], arr, num_corners), #if defined(OPENVX_DEBUGGING) vxConvertDepthNode(g,virts[3],virts[4],VX_CONVERT_POLICY_WRAP,shift), vxFWriteImageNode(g,virts[4],"oharris_strength_power_up4.pgm"), #endif }; status = VX_SUCCESS; status |= vxAddParameterToGraphByIndex(g, nodes[0], 0); // src status |= vxAddParameterToGraphByIndex(g, nodes[2], 1); // str status |= vxAddParameterToGraphByIndex(g, nodes[2], 2); // min status |= vxAddParameterToGraphByIndex(g, nodes[1], 2); // sen status |= vxAddParameterToGraphByIndex(g, nodes[0], 1); // win status |= vxAddParameterToGraphByIndex(g, nodes[1], 3); // blk status |= vxAddParameterToGraphByIndex(g, nodes[3], 1); // arr status |= vxAddParameterToGraphByIndex(g, nodes[3], 2); // num_corners for (i = 0; i < dimof(nodes); i++) { vxReleaseNode(&nodes[i]); } for (i = 0; i < dimof(virts); i++) { vxReleaseImage(&virts[i]); } vxReleaseScalar(&shift); status |= vxVerifyGraph(g); VX_PRINT(VX_ZONE_INFO, "Status from Child Graph = %d\n", status); if (status == VX_SUCCESS) { status = vxSetChildGraphOfNode(node, g); } else { vxReleaseGraph(&g); } } } return status; }
static vx_status VX_CALLBACK vxHalfscaleGaussianInitializer(vx_node node, const vx_reference *parameters, vx_uint32 num) { vx_status status = VX_ERROR_INVALID_PARAMETERS; if (num == 3) { vx_image input = (vx_image)parameters[0]; vx_image output = (vx_image)parameters[1]; vx_int32 kernel_size = 3; vx_convolution convolution = 0; vx_context context = vxGetContext((vx_reference)node); vx_graph graph = vxCreateGraph(context); if (vxGetStatus((vx_reference)graph) == VX_SUCCESS) { vx_uint32 i; /* We have a child-graph; we want to make sure the parent graph is recognized as a valid scope for sake of virtual image parameters. */ graph->parentGraph = node->graph; vxReadScalarValue((vx_scalar)parameters[2], &kernel_size); if (kernel_size == 3 || kernel_size == 5) { if (kernel_size == 5) { convolution = vxCreateGaussian5x5Convolution(context); } if (kernel_size == 3 || convolution) { vx_image virt = vxCreateVirtualImage(graph, 0, 0, VX_DF_IMAGE_U8); vx_node nodes[] = { kernel_size == 3 ? vxGaussian3x3Node(graph, input, virt) : vxConvolveNode(graph, input, convolution, virt), vxScaleImageNode(graph, virt, output, VX_INTERPOLATION_TYPE_NEAREST_NEIGHBOR), }; vx_border_mode_t borders; vxQueryNode(node, VX_NODE_ATTRIBUTE_BORDER_MODE, &borders, sizeof(borders)); for (i = 0; i < dimof(nodes); i++) { vxSetNodeAttribute(nodes[i], VX_NODE_ATTRIBUTE_BORDER_MODE, &borders, sizeof(borders)); } status = VX_SUCCESS; status |= vxAddParameterToGraphByIndex(graph, nodes[0], 0); /* input image */ status |= vxAddParameterToGraphByIndex(graph, nodes[1], 1); /* output image */ status |= vxAddParameterToGraphByIndex(graph, node, 2); /* gradient size - refer to self to quiet sub-graph validator */ status |= vxVerifyGraph(graph); /* release our references, the graph will hold it's own */ for (i = 0; i < dimof(nodes); i++) { vxReleaseNode(&nodes[i]); } if (convolution) vxReleaseConvolution(&convolution); vxReleaseImage(&virt); status |= vxSetChildGraphOfNode(node, graph); } } vxReleaseGraph(&graph); } } return status; }
/*! * \brief An example of an super resolution algorithm. * \ingroup group_example */ int example_super_resolution(int argc, char *argv[]) { vx_status status = VX_SUCCESS; vx_uint32 image_index = 0, max_num_images = 4; vx_uint32 width = 640; vx_uint32 i = 0; vx_uint32 winSize = 32; vx_uint32 height = 480; vx_int32 sens_thresh = 20; vx_float32 alpha = 0.2f; vx_float32 tau = 0.5f; vx_enum criteria = VX_TERM_CRITERIA_BOTH; // lk params vx_float32 epsilon = 0.01; vx_int32 num_iterations = 10; vx_bool use_initial_estimate = vx_true_e; vx_int32 min_distance = 5; // harris params vx_float32 sensitivity = 0.04; vx_int32 gradient_size = 3; vx_int32 block_size = 3; vx_context context = vxCreateContext(); vx_scalar alpha_s = vxCreateScalar(context, VX_TYPE_FLOAT32, &alpha); vx_scalar tau_s = vxCreateScalar(context, VX_TYPE_FLOAT32, &tau); vx_matrix matrix_forward = vxCreateMatrix(context, VX_TYPE_FLOAT32, 3, 3); vx_matrix matrix_backwords = vxCreateMatrix(context, VX_TYPE_FLOAT32, 3, 3); vx_array old_features = vxCreateArray(context, VX_TYPE_KEYPOINT, 1000); vx_array new_features = vxCreateArray(context, VX_TYPE_KEYPOINT, 1000); vx_scalar epsilon_s = vxCreateScalar(context, VX_TYPE_FLOAT32, &epsilon); vx_scalar num_iterations_s = vxCreateScalar(context, VX_TYPE_INT32, &num_iterations); vx_scalar use_initial_estimate_s = vxCreateScalar(context, VX_TYPE_BOOL, &use_initial_estimate); vx_scalar min_distance_s = vxCreateScalar(context, VX_TYPE_INT32, &min_distance); vx_scalar sensitivity_s = vxCreateScalar(context, VX_TYPE_FLOAT32, &sensitivity); vx_scalar sens_thresh_s = vxCreateScalar(context, VX_TYPE_INT32, &sens_thresh); vx_scalar num_corners = vxCreateScalar(context, VX_TYPE_SIZE, NULL); if (vxGetStatus((vx_reference)context) == VX_SUCCESS) { vx_image images[] = { vxCreateImage(context, width, height, VX_DF_IMAGE_UYVY), // index 0: vxCreateImage(context, width, height, VX_DF_IMAGE_U8), // index 1: Get Y channel vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_U8), // index 2: scale up to high res. vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_U8), // index 3: back wrap: transform to the original Image. vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_U8), // index 4: guassian blur vxCreateImage(context, width, height, VX_DF_IMAGE_U8), // index 5: scale down vxCreateImage(context, width, height, VX_DF_IMAGE_S16), // index 6: Subtract the transformed Image with original moved Image vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_S16), // index 7: Scale Up the delta image. vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_S16), // index 8: Guassian blur the delta Image vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_S16), // index 9: forward wrap: tranform the deltas back to the high res Image vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_U8), // index 10: accumulate sum? vxCreateImage(context, width, height, VX_DF_IMAGE_U8), // index 11: Get U channel vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_U8), // index 12: scale up to high res. vxCreateImage(context, width, height, VX_DF_IMAGE_U8), // index 13: Get V channel vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_U8), // index 14: scale up to high res. vxCreateImage(context, width, height, VX_DF_IMAGE_UYVY), // index 15: output image vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_U8), // index 16: original y image scaled vxCreateImage(context, width * 2, height * 2, VX_DF_IMAGE_U8), // index 17: difference image for last calculation }; vx_pyramid pyramid_new = vxCreatePyramid(context, 4, 2, width, height, VX_DF_IMAGE_U8); vx_pyramid pyramid_old = vxCreatePyramid(context, 4, 2, width, height, VX_DF_IMAGE_U8); vx_graph graphs[] = { vxCreateGraph(context), vxCreateGraph(context), vxCreateGraph(context), vxCreateGraph(context), }; vxLoadKernels(context, "openvx-debug"); if (vxGetStatus((vx_reference)graphs[0]) == VX_SUCCESS) { vxChannelExtractNode(graphs[0], images[0], VX_CHANNEL_Y, images[1]); // One iteration of super resolution calculation vxScaleImageNode(graphs[0], images[1], images[2], VX_INTERPOLATION_TYPE_BILINEAR); vxWarpPerspectiveNode(graphs[0], images[2], matrix_forward, 0, images[3]); vxGaussian3x3Node(graphs[0], images[3], images[4]); vxScaleImageNode(graphs[0], images[4], images[5], VX_INTERPOLATION_TYPE_BILINEAR); vxSubtractNode(graphs[0], images[5], images[16], VX_CONVERT_POLICY_SATURATE, images[6]); vxScaleImageNode(graphs[0], images[6], images[7], VX_INTERPOLATION_TYPE_BILINEAR); vxGaussian3x3Node(graphs[0], images[7], images[8]); vxWarpPerspectiveNode(graphs[0], images[8], matrix_backwords, 0, images[9]); vxAccumulateWeightedImageNode(graphs[0], images[9], alpha_s, images[10]); } if (vxGetStatus((vx_reference)graphs[1]) == VX_SUCCESS) { vxChannelExtractNode(graphs[1], images[0], VX_CHANNEL_Y, images[1]); // One iteration of super resolution calculation vxGaussianPyramidNode(graphs[1], images[1], pyramid_new); vxOpticalFlowPyrLKNode(graphs[1], pyramid_old, pyramid_new, old_features, old_features, new_features, criteria, epsilon_s, num_iterations_s, use_initial_estimate_s, winSize); } if (vxGetStatus((vx_reference)graphs[2]) == VX_SUCCESS) { vxChannelExtractNode(graphs[2], images[0], VX_CHANNEL_Y, images[1]); // One iteration of super resolution calculation vxHarrisCornersNode(graphs[2], images[1], sens_thresh_s, min_distance_s, sensitivity_s, gradient_size, block_size, old_features, num_corners); vxGaussianPyramidNode(graphs[2], images[1], pyramid_old); vxScaleImageNode(graphs[2], images[1], images[16], VX_INTERPOLATION_TYPE_BILINEAR); } if (vxGetStatus((vx_reference)graphs[3]) == VX_SUCCESS) { vxSubtractNode(graphs[3], images[10], images[16], VX_CONVERT_POLICY_SATURATE, images[17]); vxAccumulateWeightedImageNode(graphs[3], images[17], tau_s, images[16]); vxChannelExtractNode(graphs[3], images[16], VX_CHANNEL_U, images[11]); vxScaleImageNode(graphs[3], images[11], images[12], VX_INTERPOLATION_TYPE_BILINEAR); // upscale the u channel vxChannelExtractNode(graphs[3], images[0], VX_CHANNEL_V, images[13]); vxScaleImageNode(graphs[3], images[13], images[14], VX_INTERPOLATION_TYPE_BILINEAR); // upscale the v channel vxChannelCombineNode(graphs[3], images[10], images[12], images[14], 0, images[15]); // recombine the channels } status = VX_SUCCESS; status |= vxVerifyGraph(graphs[0]); status |= vxVerifyGraph(graphs[1]); status |= vxVerifyGraph(graphs[2]); status |= vxVerifyGraph(graphs[3]); if (status == VX_SUCCESS) { /* read the initial image in */ status |= vxuFReadImage(context, "c:\\work\\super_res\\superres_1_UYVY.yuv", images[0]); /* compute the "old" pyramid */ status |= vxProcessGraph(graphs[2]); /* for each input image, read it in and run graphs[1] and [0]. */ for (image_index = 1; image_index < max_num_images; image_index++) { char filename[256]; sprintf(filename, "c:\\work\\super_res\\superres_%d_UYVY.yuv", image_index + 1); status |= vxuFReadImage(context, filename, images[0]); status |= vxProcessGraph(graphs[1]); userCalculatePerspectiveTransformFromLK(matrix_forward, matrix_backwords, old_features, new_features); status |= vxProcessGraph(graphs[0]); } /* run the final graph */ status |= vxProcessGraph(graphs[3]); /* save the output */ status |= vxuFWriteImage(context, images[15], "superres_UYVY.yuv"); } vxReleaseGraph(&graphs[0]); vxReleaseGraph(&graphs[1]); vxReleaseGraph(&graphs[2]); vxReleaseGraph(&graphs[3]); for (i = 0; i < dimof(images); i++) { vxReleaseImage(&images[i]); } vxReleasePyramid(&pyramid_new); vxReleasePyramid(&pyramid_old); } vxReleaseMatrix(&matrix_forward); vxReleaseMatrix(&matrix_backwords); vxReleaseScalar(&alpha_s); vxReleaseScalar(&tau_s); /* Release the context last */ vxReleaseContext(&context); return status; }