//////// // main() has all the OpenVX application code for this exercise. // Command-line usage: // % exercise3 [<video-sequence>|<camera-device-number>] // When neither video sequence nor camera device number is specified, // it defaults to the video sequence in "PETS09-S1-L1-View001.avi". int main( int argc, char * argv[] ) { // Get default video sequence when nothing is specified on command-line and // instantiate OpenCV GUI module for reading input RGB images and displaying // the image with OpenVX results const char * video_sequence = argv[1]; CGuiModule gui( video_sequence ); // Try grab first video frame from the sequence using cv::VideoCapture // and check if video frame is available if( !gui.Grab() ) { printf( "ERROR: input has no video\n" ); return 1; } //////// // Set the application configuration parameters. Note that input video // sequence is an 8-bit RGB image with dimensions given by gui.GetWidth() // and gui.GetHeight(). The parameters for the tensors are: // tensor_dims - 3 dimensions of tensor [3 x <width> x <height>] // tensor_input_fixed_point_pos - fixed-point position for input tensor // tensor_output_fixed_point_pos - fixed-point position for output tensor vx_uint32 width = gui.GetWidth(); vx_uint32 height = gui.GetHeight(); vx_size tensor_dims[3] = { width, height, 3 }; // 3 channels (RGB) vx_uint8 tensor_input_fixed_point_pos = 5; // Q10.5: input[-128..127] will be mapped to -4..3.96875 vx_uint8 tensor_output_fixed_point_pos = 7; // Q8.7: output[-1..1] will be mapped to -128 to 128 //////// // Create the OpenVX context and make sure returned context is valid and // register the log_callback to receive messages from OpenVX framework. vx_context context = vxCreateContext(); ERROR_CHECK_OBJECT( context ); vxRegisterLogCallback( context, log_callback, vx_false_e ); //////// // Register user kernels with the context. // // TODO STEP 05:******** // 1. Register user kernel with context by calling your implementation of "registerUserKernel()". // ERROR_CHECK_STATUS( registerUserKernel( context ) ); //////// // Create OpenVX tensor objects for input and output // // TODO STEP 06:******** // 1. Create tensor objects using tensor_dims, tensor_input_fixed_point_pos, and // tensor_output_fixed_point_pos // vx_tensor input_tensor = vxCreateTensor( context, 3, tensor_dims, VX_TYPE_INT16, tensor_input_fixed_point_pos ); // vx_tensor output_tensor = vxCreateTensor( context, /* Fill in parameters */ ); // ERROR_CHECK_OBJECT( input_tensor ); // ERROR_CHECK_OBJECT( output_tensor ); //////// // Create, build, and verify the graph with user kernel node. // // TODO STEP 07:******** // 1. Build a graph with just one node created using userTensorCosNode() // vx_graph graph = vxCreateGraph( context ); // ERROR_CHECK_OBJECT( graph ); // vx_node cos_node = userTensorCosNode( graph, /* Fill in parameters */ ); // ERROR_CHECK_OBJECT( cos_node ); // ERROR_CHECK_STATUS( vxReleaseNode( &cos_node ) ); // ERROR_CHECK_STATUS( vxVerifyGraph( graph ) ); //////// // Process the video sequence frame by frame until the end of sequence or aborted. cv::Mat bgrMatForOutputDisplay( height, width, CV_8UC3 ); for( int frame_index = 0; !gui.AbortRequested(); frame_index++ ) { //////// // Copy input RGB frame from OpenCV into input_tensor with UINT8 to Q10.5 (INT16) conversion. // In order to do this, vxMapTensorPatch API (see "vx_ext_amd.h"). // // TODO STEP 08:******** // 1. Use vxMapTensorPatch API for access to input tensor object for writing // 2. Copy UINT8 data from OpenCV RGB image to tensor object // 3. Use vxUnmapTensorPatch API to return control of buffer back to framework vx_uint8 * cv_rgb_image_buffer = gui.GetBuffer(); vx_size rgb_stride = gui.GetStride(); // vx_size zeros[3] = { 0 }; // vx_size tensor_stride[3]; // vx_map_id map_id; // vx_uint8 * buf; // ERROR_CHECK_STATUS( vxMapTensorPatch( input_tensor, // 3, /* Fill in parameters */ // &map_id, tensor_stride, // (void **)&buf, VX_WRITE_ONLY, VX_MEMORY_TYPE_HOST, 0 ) ); // for( vx_size c = 0; c < 3; c++ ) // { // for( vx_size y = 0; y < height; y++ ) // { // const vx_uint8 * img = cv_rgb_image_buffer + y * rgb_stride + c; // vx_int16 * inp = (vx_int16 *)(buf + y * tensor_stride[1] + c * tensor_stride[2]); // for( vx_size x = 0; x < width; x++ ) // { // // convert 0..255 to Q10.5 [-4..3.96875 range] fixed-point format // inp[x] = (vx_int16)img[x * 3] - 128; // } // } // } // ERROR_CHECK_STATUS( vxUnmapTensorPatch( input_tensor, map_id ) ); //////// // Now that input tensor is ready, just run the graph. // // TODO STEP 09:******** // 1. Call vxProcessGraph to execute the tensor_cos kernel in graph // ERROR_CHECK_STATUS( vxProcessGraph( graph ) ); //////// // Display the output tensor object as RGB image // // TODO STEP 10:******** // 1. Use vxMapTensorPatch API for access to output tensor object for reading // 2. Copy tensor object data into OpenCV RGB image // 3. Use vxUnmapTensorPatch API to return control of buffer back to framework // ERROR_CHECK_STATUS( vxMapTensorPatch( output_tensor, // 3, zeros, tensor_dims, // &map_id, tensor_stride, // (void **)&buf, VX_WRITE_ONLY, VX_MEMORY_TYPE_HOST, 0 ) ); // vx_uint8 * cv_bgr_image_buffer = bgrMatForOutputDisplay.data; // vx_size bgr_stride = bgrMatForOutputDisplay.step; // for( vx_size c = 0; c < 3; c++ ) // { // for( vx_size y = 0; y < height; y++ ) // { // const vx_int16 * out = (const vx_int16 *)(buf + y * tensor_stride[1] + c * tensor_stride[2]); // vx_uint8 * img = cv_bgr_image_buffer + y * bgr_stride + (2 - c); // (2 - c) for RGB to BGR conversion // for( vx_size x = 0; x < width; x++ ) // { // // scale convert Q8.7 [-1..1 range] fixed-point format to 0..255 with saturation // vx_int16 value = out[x] + 128; // value = value > 255 ? 255 : value; // saturation needed // img[x * 3] = (vx_uint8)value; // } // } // } //#if ENABLE_DISPLAY // cv::imshow( "Cosine", bgrMatForOutputDisplay ); //#endif // ERROR_CHECK_STATUS( vxUnmapTensorPatch( output_tensor, map_id ) ); //////// // Display the results and grab the next input RGB frame for the next iteration. char text[128]; sprintf( text, "Keyboard ESC/Q-Quit SPACE-Pause [FRAME %d] [fixed_point_pos input:%d output:%d]", frame_index, tensor_input_fixed_point_pos, tensor_output_fixed_point_pos ); gui.DrawText( 0, 16, text ); gui.Show(); if( !gui.Grab() ) { // Terminate the processing loop if the end of sequence is detected. gui.WaitForKey(); break; } } ////////******** // To release an OpenVX object, you need to call vxRelease<Object> API which takes a pointer to the object. // If the release operation is successful, the OpenVX framework will reset the object to NULL. // // TODO STEP 11:**** // 1. Release graph and tensor objects // ERROR_CHECK_STATUS( vxReleaseGraph( &graph ) ); // ERROR_CHECK_STATUS( vxReleaseTensor( &input_tensor ) ); // ERROR_CHECK_STATUS( vxReleaseTensor( &output_tensor ) ); ERROR_CHECK_STATUS( vxReleaseContext( &context ) ); return 0; }
int main(int argc, char* argv[]) { try { nvxio::Application &app = nvxio::Application::get(); // // Parse command line arguments // // std::string sourceUri = app.findSampleFilePath("file:///dev/video0"); // "/home/ubuntu/VisionWorks-SFM-0.82-Samples/data/sfm/parking_sfm.mp4"; std::string sourceUri = "/home/px4/test.mp4"; std::string configFile = app.findSampleFilePath("sfm/sfm_config.ini"); bool fullPipeline = false; std::string maskFile; bool noLoop = false; app.setDescription("This sample demonstrates Structure from Motion (SfM) algorithm"); app.addOption(0, "mask", "Optional mask", nvxio::OptionHandler::string(&maskFile)); app.addBooleanOption('f', "fullPipeline", "Run full SfM pipeline without using IMU data", &fullPipeline); app.addBooleanOption('n', "noLoop", "Run sample without loop", &noLoop); app.init(argc, argv); nvx_module_version_t sfmVersion; nvxSfmGetVersion(&sfmVersion); std::cout << "VisionWorks SFM version: " << sfmVersion.major << "." << sfmVersion.minor << "." << sfmVersion.patch << sfmVersion.suffix << std::endl; std::string imuDataFile; std::string frameDataFile; if (!fullPipeline) { imuDataFile = app.findSampleFilePath("sfm/imu_data.txt"); frameDataFile = app.findSampleFilePath("sfm/images_timestamps.txt"); } if (app.getPreferredRenderName() != "default") { std::cerr << "The sample uses custom Render for GUI. --nvxio_render option is not supported!" << std::endl; return nvxio::Application::APP_EXIT_CODE_NO_RENDER; } // // Read SfMParams // nvx::SfM::SfMParams params; std::string msg; if (!read(configFile, params, msg)) { std::cout << msg << std::endl; return nvxio::Application::APP_EXIT_CODE_INVALID_VALUE; } // // Create OpenVX context // nvxio::ContextGuard context; // // Messages generated by the OpenVX framework will be processed by nvxio::stdoutLogCallback // vxRegisterLogCallback(context, &nvxio::stdoutLogCallback, vx_false_e); // // Add SfM kernels // NVXIO_SAFE_CALL(nvxSfmRegisterKernels(context)); // // Create a Frame Source // std::unique_ptr<nvxio::FrameSource> source( nvxio::createDefaultFrameSource(context, sourceUri)); if (!source || !source->open()) { std::cout << "Can't open source file: " << sourceUri << std::endl; // int haha=3; // fprintf(stderr, "errno = %d \n", haha); return nvxio::Application::APP_EXIT_CODE_NO_RESOURCE; } nvxio::FrameSource::Parameters sourceParams = source->getConfiguration(); // // Create OpenVX Image to hold frames from video source // vx_image frame = vxCreateImage(context, sourceParams.frameWidth, sourceParams.frameHeight, sourceParams.format); NVXIO_CHECK_REFERENCE(frame); // // Load mask image if needed // vx_image mask = NULL; if (!maskFile.empty()) { mask = nvxio::loadImageFromFile(context, maskFile, VX_DF_IMAGE_U8); vx_uint32 mask_width = 0, mask_height = 0; vxQueryImage(mask, VX_IMAGE_ATTRIBUTE_WIDTH, &mask_width, sizeof(mask_width)); vxQueryImage(mask, VX_IMAGE_ATTRIBUTE_HEIGHT, &mask_height, sizeof(mask_height)); if (mask_width != sourceParams.frameWidth || mask_height != sourceParams.frameHeight) { std::cerr << "The mask must have the same size as the input source." << std::endl; return nvxio::Application::APP_EXIT_CODE_INVALID_DIMENSIONS; } } // // Create 3D Render instance // std::unique_ptr<nvxio::Render3D> render3D(nvxio::createDefaultRender3D(context, 0, 0, "SfM Point Cloud", sourceParams.frameWidth, sourceParams.frameHeight)); nvxio::Render::TextBoxStyle style = {{255, 255, 255, 255}, {0, 0, 0, 255}, {10, 10}}; if (!render3D) { std::cerr << "Can't create a renderer" << std::endl; return nvxio::Application::APP_EXIT_CODE_NO_RENDER; } float fovYinRad = 2.f * atanf(sourceParams.frameHeight / 2.f / params.pFy); render3D->setDefaultFOV(180.f / nvxio::PI_F * fovYinRad); EventData eventData; render3D->setOnKeyboardEventCallback(eventCallback, &eventData); // // Create SfM class instance // std::unique_ptr<nvx::SfM> sfm(nvx::SfM::createSfM(context, params)); // // Create FenceDetectorWithKF class instance // FenceDetectorWithKF fenceDetector; nvxio::FrameSource::FrameStatus frameStatus; do { frameStatus = source->fetch(frame); } while (frameStatus == nvxio::FrameSource::TIMEOUT); if (frameStatus == nvxio::FrameSource::CLOSED) { std::cerr << "Source has no frames" << std::endl; return nvxio::Application::APP_EXIT_CODE_NO_FRAMESOURCE; } vx_status status = sfm->init(frame, mask, imuDataFile, frameDataFile); if (status != VX_SUCCESS) { std::cerr << "Failed to initialize the algorithm" << std::endl; return nvxio::Application::APP_EXIT_CODE_ERROR; } const vx_size maxNumOfPoints = 2000; const vx_size maxNumOfPlanesVertices = 2000; vx_array filteredPoints = vxCreateArray(context, NVX_TYPE_POINT3F, maxNumOfPoints); vx_array planesVertices = vxCreateArray(context, NVX_TYPE_POINT3F, maxNumOfPlanesVertices); // // Run processing loop // vx_matrix model = vxCreateMatrix(context, VX_TYPE_FLOAT32, 4, 4); float eye_data[4*4] = {1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1}; vxWriteMatrix(model, eye_data); nvxio::Render3D::PointCloudStyle pcStyle = {0, 12}; nvxio::Render3D::PlaneStyle fStyle = {0, 10}; GroundPlaneSmoother groundPlaneSmoother(7); nvx::Timer totalTimer; totalTimer.tic(); double proc_ms = 0; float yGroundPlane = 0; while (!eventData.shouldStop) { if (!eventData.pause) { frameStatus = source->fetch(frame); if (frameStatus == nvxio::FrameSource::TIMEOUT) { continue; } if (frameStatus == nvxio::FrameSource::CLOSED) { if(noLoop) break; if (!source->open()) { std::cerr << "Failed to reopen the source" << std::endl; break; } do { frameStatus = source->fetch(frame); } while (frameStatus == nvxio::FrameSource::TIMEOUT); sfm->init(frame, mask, imuDataFile, frameDataFile); fenceDetector.reset(); continue; } // Process nvx::Timer procTimer; procTimer.tic(); sfm->track(frame, mask); proc_ms = procTimer.toc(); } // Print performance results sfm->printPerfs(); if (!eventData.showPointCloud) { render3D->disableDefaultKeyboardEventCallback(); render3D->putImage(frame); } else { render3D->enableDefaultKeyboardEventCallback(); } filterPoints(sfm->getPointCloud(), filteredPoints); render3D->putPointCloud(filteredPoints, model, pcStyle); if (eventData.showFences) { fenceDetector.getFencePlaneVertices(filteredPoints, planesVertices); render3D->putPlanes(planesVertices, model, fStyle); } if (fullPipeline && eventData.showGP) { const float x1(-1.5), x2(1.5), z1(1), z2(4); vx_matrix gp = sfm->getGroundPlane(); yGroundPlane = groundPlaneSmoother.getSmoothedY(gp, x1, z1); nvx_point3f_t pt[4] = {{x1, yGroundPlane, z1}, {x1, yGroundPlane, z2}, {x2, yGroundPlane, z2}, {x2, yGroundPlane, z1}}; vx_array gpPoints = vxCreateArray(context, NVX_TYPE_POINT3F, 4); vxAddArrayItems(gpPoints, 4, pt, sizeof(pt[0])); render3D->putPlanes(gpPoints, model, fStyle); vxReleaseArray(&gpPoints); } double total_ms = totalTimer.toc(); // Add a delay to limit frame rate app.sleepToLimitFPS(total_ms); total_ms = totalTimer.toc(); totalTimer.tic(); std::string state = createInfo(fullPipeline, proc_ms, total_ms, eventData); render3D->putText(state.c_str(), style); if (!render3D->flush()) { eventData.shouldStop = true; } } // // Release all objects // vxReleaseImage(&frame); vxReleaseImage(&mask); vxReleaseMatrix(&model); vxReleaseArray(&filteredPoints); vxReleaseArray(&planesVertices); } catch (const std::exception& e) { std::cerr << "Error: " << e.what() << std::endl; return nvxio::Application::APP_EXIT_CODE_ERROR; } return nvxio::Application::APP_EXIT_CODE_SUCCESS; }
//////// // main() has all the OpenVX application code for this exercise. // Command-line usage: // % solution_exercise2 [<video-sequence>|<camera-device-number>] // When neither video sequence nor camera device number is specified, // it defaults to the video sequence in "PETS09-S1-L1-View001.avi". int main( int argc, char * argv[] ) { // Get default video sequence when nothing is specified on command-line and // instantiate OpenCV GUI module for reading input RGB images and displaying // the image with OpenVX results. const char * video_sequence = argv[1]; CGuiModule gui( video_sequence ); // Try to grab the first video frame from the sequence using cv::VideoCapture // and check if a video frame is available. if( !gui.Grab() ) { printf( "ERROR: input has no video\n" ); return 1; } //////// // Set the application configuration parameters. Note that input video // sequence is an 8-bit RGB image with dimensions given by gui.GetWidth() // and gui.GetHeight(). The parameters for the Harris corners algorithm are: // max_keypoint_count - maximum number of keypoints to track // harris_strength_thresh - minimum threshold score to keep a corner // (computed using the normalized Sobel kernel) // harris_min_distance - radial L2 distance for non-max suppression // harris_k_sensitivity - sensitivity threshold k from the Harris-Stephens // harris_gradient_size - window size for gradient computation // harris_block_size - block window size used to compute the // Harris corner score // lk_pyramid_levels - number of pyramid levels for LK optical flow // lk_termination - can be VX_TERM_CRITERIA_ITERATIONS or // VX_TERM_CRITERIA_EPSILON or // VX_TERM_CRITERIA_BOTH // lk_epsilon - error for terminating the algorithm // lk_num_iterations - number of iterations // lk_use_initial_estimate - turn on/off use of initial estimates // lk_window_dimension - size of window on which to perform the algorithm vx_uint32 width = gui.GetWidth(); vx_uint32 height = gui.GetHeight(); vx_size max_keypoint_count = 10000; vx_float32 harris_strength_thresh = 0.0005f; vx_float32 harris_min_distance = 5.0f; vx_float32 harris_k_sensitivity = 0.04f; vx_int32 harris_gradient_size = 3; vx_int32 harris_block_size = 3; vx_uint32 lk_pyramid_levels = 6; vx_float32 lk_pyramid_scale = VX_SCALE_PYRAMID_HALF; vx_enum lk_termination = VX_TERM_CRITERIA_BOTH; vx_float32 lk_epsilon = 0.01f; vx_uint32 lk_num_iterations = 5; vx_bool lk_use_initial_estimate = vx_false_e; vx_uint32 lk_window_dimension = 6; //////// // Create the OpenVX context and make sure the returned context is valid and // register the log_callback to receive messages from OpenVX framework. vx_context context = vxCreateContext(); ERROR_CHECK_OBJECT( context ); vxRegisterLogCallback( context, log_callback, vx_false_e ); //////// // Create OpenVX image object for input RGB image. vx_image input_rgb_image = vxCreateImage( context, width, height, VX_DF_IMAGE_RGB ); ERROR_CHECK_OBJECT( input_rgb_image ); ////////******** // OpenVX optical flow functionality requires pyramids of the current input // image and the previous image. It also requires keypoints that correspond // to the previous pyramid and will output updated keypoints into // another keypoint array. To be able to toggle between the current and // the previous buffers, you need to use OpenVX delay objects and vxAgeDelay(). // Create OpenVX pyramid and array object exemplars and create OpenVX delay // objects for both to hold two of each. Note that the exemplar objects are not // needed once the delay objects are created. // // TODO STEP 01:******** // 1. Use vxCreatePyramid API to create a pyramid exemplar with the // same dimensions as the input image, VX_DF_IMAGE_U8 as image format, // lk_pyramid_levels as levels, and lk_pyramid_scale as scale. // We gave code for this in comments. // 2. Use vxCreateArray API to create an array exemplar with // keypoint data type with num_keypoint_count as capacity. // You need to add missing parameters to code in comments. // 3. Use vxCreateDelay API to create delay objects for pyramid and // keypoint array using the exemplars created using the two steps above. // Use 2 delay slots for both of the delay objects. // We gave code for one in comments; do similar for the other. // 4. Release the pyramid and keypoint array exemplar objects. // We gave code for one in comments; do similar for the other. // 5. Use ERROR_CHECK_OBJECT/STATUS macros for proper error checking. // We gave few error checks; do similar for the others. // vx_pyramid pyramidExemplar = vxCreatePyramid( context, lk_pyramid_levels, // lk_pyramid_scale, width, height, VX_DF_IMAGE_U8 ); // ERROR_CHECK_OBJECT( pyramidExemplar ); // vx_delay pyramidDelay = vxCreateDelay( context, ( vx_reference )pyramidExemplar, 2 ); // ERROR_CHECK_OBJECT( pyramidDelay ); // ERROR_CHECK_STATUS( vxReleasePyramid( &pyramidExemplar ) ); // vx_array keypointsExemplar = vxCreateArray( /* Fill in parameters */ ); // vx_delay keypointsDelay = vxCreateDelay( /* Fill in parameters */ ); ////////******** // An object from a delay slot can be accessed using vxGetReferenceFromDelay API. // You need to use index = 0 for the current object and index = -1 for the previous object. // // TODO STEP 02:******** // 1. Use vxGetReferenceFromDelay API to get the current and previous // pyramid objects from pyramid delay object. Note that you need // to typecast the vx_reference object to vx_pyramid. // We gave code for one in comments; do similar for the other. // 2. Similarly, get the current and previous keypoint array objects from // the keypoint delay object. // We gave code for one in comments; do similar for the other. // 3. Use ERROR_CHECK_OBJECT for proper error checking. // We gave one error check; do similar for the others. // vx_pyramid currentPyramid = ( vx_pyramid ) vxGetReferenceFromDelay( pyramidDelay, 0 ); // vx_pyramid previousPyramid = ( vx_pyramid ) vxGetReferenceFromDelay( /* Fill in parameters */ ); // vx_array currentKeypoints = ( vx_array ) vxGetReferenceFromDelay( /* Fill in parameters */ ); // vx_array previousKeypoints = ( vx_array ) vxGetReferenceFromDelay( keypointsDelay, -1 ); // ERROR_CHECK_OBJECT( currentPyramid ); ////////******** // Harris and optical flow algorithms require their own graph objects. // The Harris graph needs to extract gray scale image out of input RGB, // compute an initial set of keypoints, and compute an initial pyramid for use // by the optical flow graph. // // TODO STEP 03:******** // 1. Create two graph objects: one for the Harris corner detector and // the other for feature tracking using optical flow using the // vxCreateGraph API. // We gave code for one graph; do similar for the other. // 2. Use ERROR_CHECK_OBJECT to check the objects. // We gave one error check; do similar for the other. // vx_graph graphHarris = vxCreateGraph( context ); // vx_graph graphTrack = /* Fill in here */; // ERROR_CHECK_OBJECT( graphHarris ); ////////******** // Harris and pyramid computation expect input to be an 8-bit image. // Given that input is an RGB image, it is best to extract a gray image // from RGB image, which requires two steps: // - perform RGB to IYUV color conversion // - extract Y channel from IYUV image // This requires two intermediate OpenVX image objects. Since you don't // need to access these objects from the application, they can be virtual // objects that can be created using the vxCreateVirtualImage API. // // TODO STEP 04:******** // 1. Create an IYUV image and a U8 image (for Y channel) with the same // dimensions as the input RGB image. Note that the image formats for // IYUV and U8 images are VX_DF_IMAGE_IYUV and VX_DF_IMAGE_U8. // Note that virtual objects are specific to a graph, so you // need to create two sets, one for each graph. // We gave one fully in comments and you need to fill in missing // parameters for the others. // 2. Use ERROR_CHECK_OBJECT to check the objects. // We gave one error check in comments; do similar for others. // vx_image harris_yuv_image = vxCreateVirtualImage( graphHarris, width, height, VX_DF_IMAGE_IYUV ); // vx_image harris_luma_image = vxCreateVirtualImage( graphHarris, /* Fill in parameters */ ); // vx_image opticalflow_yuv_image = vxCreateVirtualImage( graphTrack, /* Fill in parameters */ ); // vx_image opticalflow_luma_image = vxCreateVirtualImage( /* Fill in parameters */ ); // ERROR_CHECK_OBJECT( harris_yuv_image ); ////////******** // The Harris corner detector and optical flow nodes (see "VX/vx_nodes.h") // take strength_thresh, min_distance, sensitivity, epsilon, // num_iterations, and use_initial_estimate parameters as scalar // data objects. So, you need to create scalar objects with the corresponding // configuration parameters. // // TODO STEP 05:******** // 1. Create scalar data objects of VX_TYPE_FLOAT32 for strength_thresh, // min_distance, sensitivity, and epsilon. Set their // initial values to harris_strength_thresh, harris_min_distance, // harris_k_sensitivity, and lk_epsilon. // We gave code full code for one scalar in comments; fill in // missing arguments for other ones. // 2. Similarly, create scalar objects for num_iterations and // use_initial_estimate with initial values: lk_num_iterations and // lk_use_initial_estimate. Make sure to use proper data types for // these parameters. // We gave code full code for one scalar in comments; fill in // missing arguments for the other. // 3. Use ERROR_CHECK_OBJECT to check proper creation of objects. // We gave the error check for one scalar; do similar for other 5 scalars. // vx_scalar strength_thresh = NULL; // vxCreateScalar( context, VX_TYPE_FLOAT32, &harris_strength_thresh ); // vx_scalar min_distance = NULL; // vxCreateScalar( context, /* Fill in parameters */ ); // vx_scalar sensitivity = NULL; // vxCreateScalar( /* Fill in parameters */ ); // vx_scalar epsilon = NULL; // vxCreateScalar( /* Fill in parameters */ ); // vx_scalar num_iterations = NULL; // vxCreateScalar( context, VX_TYPE_UINT32, /* Fill in parameter */ ); // vx_scalar use_initial_estimate = NULL; // vxCreateScalar( context, VX_TYPE_BOOL, &lk_use_initial_estimate ); // ERROR_CHECK_OBJECT( strength_thresh ); ////////******** // Now all the objects have been created for building the graphs. // First, build a graph that performs Harris corner detection and initial pyramid computation. // See "VX/vx_nodes.h" for APIs how to add nodes into a graph. // // TODO STEP 06:******** // 1. Use vxColorConvertNode and vxChannelExtractNode APIs to get gray // scale image for Harris and Pyramid computation from the input // RGB image. Add these nodes into Harris graph. // We gave code in comments with a missing parameter for you to fill in. // 2. Use vxGaussianPyramidNode API to add pyramid computation node. // You need to use the current pyramid from the pyramid delay object. // We gave code in comments with a missing parameter for you to fill in. // 3. Use vxHarrisCornersNode API to add a Harris corners node. // You need to use the current keypoints from keypoints delay object. // We gave code in comments with few missing parameters for you to fill in. // 4. Use ERROR_CHECK_OBJECT to check proper creation of objects. // 5. Release node and virtual objects immediately since the graph // retains references to them. // 6. Call vxVerifyGraph to check for any errors in the graph. // Fill in missing parameter in commented code. // vx_node nodesHarris[] = // { // vxColorConvertNode( graphHarris, input_rgb_image, harris_yuv_image ), // vxChannelExtractNode( graphHarris, /* Fill in parameter */, VX_CHANNEL_Y, harris_luma_image ), // vxGaussianPyramidNode( graphHarris, /* Fill in parameter */, currentPyramid ), // vxHarrisCornersNode( graphHarris, /* Fill in missing parameters */, currentKeypoints, NULL ) // }; // for( vx_size i = 0; i < sizeof( nodesHarris ) / sizeof( nodesHarris[0] ); i++ ) // { // ERROR_CHECK_OBJECT( nodesHarris[i] ); // ERROR_CHECK_STATUS( vxReleaseNode( &nodesHarris[i] ) ); // } // ERROR_CHECK_STATUS( vxReleaseImage( &harris_yuv_image ) ); // ERROR_CHECK_STATUS( vxReleaseImage( &harris_luma_image ) ); // ERROR_CHECK_STATUS( vxVerifyGraph( /* Fill in parameter */ ) ); ////////******** // Now, build a graph that performs pyramid computation and feature // tracking using optical flow. // // TODO STEP 07:******** // 1. Use vxColorConvertNode and vxChannelExtractNode APIs to get a gray // scale image for Harris and Pyramid computation from the input // RGB image. Add these nodes into Harris graph. // We gave the code in comments for color convert node; do similar // one for the channel extract node. // 2. Use vxGaussianPyramidNode API to add pyramid computation node. // You need to use the current pyramid from the pyramid delay object. // Most of the code is given in the comments; fill in the missing parameter. // 3. Use vxOpticalFlowPyrLKNode API to add an optical flow node. You need to // use the current and previous keypoints from the keypoints delay object. // Fill in the missing parameters in commented code. // 4. Use ERROR_CHECK_OBJECT to check proper creation of objects. // 5. Release node and virtual objects immediately since the graph // retains references to them. // 6. Call vxVerifyGraph to check for any errors in the graph. // Fill in the missing parameter in commented code. // vx_node nodesTrack[] = // { // vxColorConvertNode( graphTrack, input_rgb_image, opticalflow_yuv_image ), // vxChannelExtractNode( graphTrack, /* Fill in parameters */ ), // vxGaussianPyramidNode( graphTrack, /* Fill in parameter */, currentPyramid ), // vxOpticalFlowPyrLKNode( graphTrack, /* Fill in parameters */ ) // }; // for( vx_size i = 0; i < sizeof( nodesTrack ) / sizeof( nodesTrack[0] ); i++ ) // { // ERROR_CHECK_OBJECT( nodesTrack[i] ); // ERROR_CHECK_STATUS( vxReleaseNode( &nodesTrack[i] ) ); // } // ERROR_CHECK_STATUS( vxReleaseImage( &opticalflow_yuv_image ) ); // ERROR_CHECK_STATUS( vxReleaseImage( &opticalflow_luma_image ) ); // ERROR_CHECK_STATUS( vxVerifyGraph( /* Fill in parameter */ ) ); //////// // Process the video sequence frame by frame until the end of sequence or aborted. for( int frame_index = 0; !gui.AbortRequested(); frame_index++ ) { //////// // Copy the input RGB frame from OpenCV to OpenVX. // In order to do this, you need to use vxAccessImagePatch and vxCommitImagePatch APIs. // See "VX/vx_api.h" for the description of these APIs. vx_rectangle_t cv_rgb_image_region; cv_rgb_image_region.start_x = 0; cv_rgb_image_region.start_y = 0; cv_rgb_image_region.end_x = width; cv_rgb_image_region.end_y = height; vx_imagepatch_addressing_t cv_rgb_image_layout; cv_rgb_image_layout.stride_x = 3; cv_rgb_image_layout.stride_y = gui.GetStride(); vx_uint8 * cv_rgb_image_buffer = gui.GetBuffer(); ERROR_CHECK_STATUS( vxAccessImagePatch( input_rgb_image, &cv_rgb_image_region, 0, &cv_rgb_image_layout, ( void ** )&cv_rgb_image_buffer, VX_WRITE_ONLY ) ); ERROR_CHECK_STATUS( vxCommitImagePatch( input_rgb_image, &cv_rgb_image_region, 0, &cv_rgb_image_layout, cv_rgb_image_buffer ) ); ////////******** // Now that input RGB image is ready, just run a graph. // Run Harris at the beginning to initialize the previous keypoints. // // TODO STEP 08:******** // 1. Run a graph using vxProcessGraph API. Select Harris graph // if the frame_index == 0 (i.e., the first frame of the video // sequence), otherwise, select the feature tracking graph. // 2. Use ERROR_CHECK_STATUS for error checking. ////////******** // To mark the keypoints in display, you need to access the output // keypoint array and draw each item on the output window using gui.DrawArrow(). // // TODO STEP 09:******** // 1. Use vxGetReferenceFromDelay API to get the current and previous // keypoints array objects from the keypoints delay object. // Make sure to typecast the vx_reference object to vx_array. // We gave one for the previous previous keypoint array in comments; // do a similar one for the current keypoint array. // 2. OpenVX array object has an attribute that keeps the current // number of items in the array. The name of the attribute is // VX_ARRAY_ATTRIBUTE_NUMITEMS and its value is of type vx_size. // Use vxQueryArray API to get number of keypoints in the // current keypoint array data object, representing number of // corners detected in the input RGB image. // IMPORTANT: Read number of items into "num_corners" // because this variable is displayed by code segment below. // We gave most part of this statement in comment; just fill in the // missing parameter. // 3. The data items in output keypoint array are of type // vx_keypoint_t (see "VX/vx_types.h"). To access the array // buffer, use vxAccessArrayRange with start index = 0, // end index = number of items in the array, and usage mode = // VX_READ_ONLY. Note that the stride returned by this access // call is not guaranteed to be sizeof(vx_keypoint_t). // Also make sure that num_corners is > 0, because // vxAccessArrayRange expects end index > 0. // We gave the code for previous keypoint array in comment; // do similar one for the current keypoint array. // 4. For each item in the keypoint buffer, use vxArrayItem to // access an individual keypoint and draw a marker at (x,y) // using gui.DrawArrow() if tracking_status field of keypoint // is non-zero. Also count number of keypoints with // non-zero tracking_status into "num_tracking" variable. // We gave most of the code; fill in the missing parameters and uncomment. // 5. Hand the control of output keypoint buffer over back to // OpenVX framework by calling vxCommitArrayRange API. // We gave the code for previous keypoint array in comment; // do similar one for the current keypoint array. // 6. Use ERROR_CHECK_STATUS for error checking. vx_size num_corners = 0, num_tracking = 0; // previousKeypoints = ( vx_array )vxGetReferenceFromDelay( keypointsDelay, -1 ); // currentKeypoints = ( vx_array )vxGetReferenceFromDelay( /* Fill in parameters */ ); // ERROR_CHECK_OBJECT( currentKeypoints ); // ERROR_CHECK_OBJECT( previousKeypoints ); // ERROR_CHECK_STATUS( vxQueryArray( previousKeypoints, /* Fill in parameter */, &num_corners, sizeof( num_corners ) ) ); if( num_corners > 0 ) { vx_size kp_old_stride, kp_new_stride; vx_keypoint_t * kp_old_buf = NULL, * kp_new_buf = NULL; // ERROR_CHECK_STATUS( vxAccessArrayRange( previousKeypoints, 0, num_corners, // &kp_old_stride, ( void ** ) &kp_old_buf, VX_READ_ONLY ) ); // ERROR_CHECK_STATUS( vxAccessArrayRange( /* Fill in parameters */ ); for( vx_size i = 0; i < num_corners; i++ ) { // vx_keypoint_t * kp_old = &vxArrayItem( vx_keypoint_t, kp_old_buf, i, kp_old_stride ); // vx_keypoint_t * kp_new = &vxArrayItem( /* Fill in parameters */ ); // if( kp_new->tracking_status ) // { // num_tracking++; // gui.DrawArrow( kp_old->x, kp_old->y, kp_new->x, kp_new->y ); // } } // ERROR_CHECK_STATUS( vxCommitArrayRange( previousKeypoints, 0, num_corners, kp_old_buf ) ); // ERROR_CHECK_STATUS( vxCommitArrayRange( /* Fill in parameters */ ) ); } ////////******** // Flip the current and previous pyramid and keypoints in the delay objects. // // TODO STEP 10:******** // 1. Use vxAgeDelay API to flip the current and previous buffers in delay objects. // You need to call vxAgeDelay for both two delay objects. // 2. Use ERROR_CHECK_STATUS for error checking. // ERROR_CHECK_STATUS( vxAgeDelay( /* Fill in parameter */ ) ); // ERROR_CHECK_STATUS( vxAgeDelay( /* Fill in parameter */ ) ); //////// // Display the results and grab the next input RGB frame for the next iteration. char text[128]; sprintf( text, "Keyboard ESC/Q-Quit SPACE-Pause [FRAME %d]", frame_index ); gui.DrawText( 0, 16, text ); sprintf( text, "Number of Corners: %d [tracking %d]", ( int )num_corners, ( int )num_tracking ); gui.DrawText( 0, 36, text ); gui.Show(); if( !gui.Grab() ) { // Terminate the processing loop if the end of sequence is detected. gui.WaitForKey(); break; } } ////////******** // Query graph performance using VX_GRAPH_ATTRIBUTE_PERFORMANCE and print timing // in milliseconds. Note that time units of vx_perf_t fields are nanoseconds. // // TODO STEP 11:******** // 1. Use vxQueryGraph API with VX_GRAPH_ATTRIBUTE_PERFORMANCE to query graph performance. // We gave the attribute query for one graph in comments. Do the same for the second graph. // 2. Print the average and min execution times in milliseconds. Use the printf in comments. // vx_perf_t perfHarris = { 0 }, perfTrack = { 0 }; // ERROR_CHECK_STATUS( vxQueryGraph( graphHarris, VX_GRAPH_ATTRIBUTE_PERFORMANCE, &perfHarris, sizeof( perfHarris ) ) ); // ERROR_CHECK_STATUS( vxQueryGraph( /* Fill in parameters here for get performance of the other graph */ ); // printf( "GraphName NumFrames Avg(ms) Min(ms)\n" // "Harris %9d %7.3f %7.3f\n" // "Track %9d %7.3f %7.3f\n", // ( int )perfHarris.num, ( float )perfHarris.avg * 1e-6f, ( float )perfHarris.min * 1e-6f, // ( int )perfTrack.num, ( float )perfTrack.avg * 1e-6f, ( float )perfTrack.min * 1e-6f ); ////////******** // Release all the OpenVX objects created in this exercise, and make the context as the last one to release. // To release an OpenVX object, you need to call vxRelease<Object> API which takes a pointer to the object. // If the release operation is successful, the OpenVX framework will reset the object to NULL. // // TODO STEP 12:******** // 1. For releasing all other objects use vxRelease<Object> APIs. // You have to release 2 graph objects, 1 image object, 2 delay objects, // 6 scalar objects, and 1 context object. // 2. Use ERROR_CHECK_STATUS for error checking. // ERROR_CHECK_STATUS( vxReleaseContext( &context ) ); return 0; }
void vxRegisterHelperAsLogReader(vx_context context) { vxInitLog(&helper_log); vxRegisterLogCallback(context, &vxHelperLogCallback, vx_false_e); }
int main(int argc, char* argv[]) { try { nvxio::Application &app = nvxio::Application::get(); // // Parse command line arguments // std::string sourceUri = app.findSampleFilePath("cars.mp4"); std::string configFile = app.findSampleFilePath("feature_tracker_demo_config.ini"); app.setDescription("This demo demonstrates Feature Tracker algorithm"); app.addOption('s', "source", "Source URI", nvxio::OptionHandler::string(&sourceUri)); app.addOption('c', "config", "Config file path", nvxio::OptionHandler::string(&configFile)); #if defined USE_OPENCV || defined USE_GSTREAMER std::string maskFile; app.addOption('m', "mask", "Optional mask", nvxio::OptionHandler::string(&maskFile)); #endif app.init(argc, argv); // // Create OpenVX context // nvxio::ContextGuard context; // // Reads and checks input parameters // nvx::FeatureTracker::HarrisPyrLKParams params; std::string error; if (!read(configFile, params, error)) { std::cout<<error; return nvxio::Application::APP_EXIT_CODE_INVALID_VALUE; } // // Create a Frame Source // std::unique_ptr<nvxio::FrameSource> source( nvxio::createDefaultFrameSource(context, sourceUri)); if (!source || !source->open()) { std::cerr << "Can't open source URI " << sourceUri << std::endl; return nvxio::Application::APP_EXIT_CODE_NO_RESOURCE; } if (source->getSourceType() == nvxio::FrameSource::SINGLE_IMAGE_SOURCE) { std::cerr << "Can't work on a single image." << std::endl; return nvxio::Application::APP_EXIT_CODE_INVALID_FORMAT; } nvxio::FrameSource::Parameters sourceParams = source->getConfiguration(); // // Create a Render // std::unique_ptr<nvxio::Render> renderer(nvxio::createDefaultRender( context, "Feature Tracker Demo", sourceParams.frameWidth, sourceParams.frameHeight)); if (!renderer) { std::cerr << "Can't create a renderer" << std::endl; return nvxio::Application::APP_EXIT_CODE_NO_RENDER; } EventData eventData; renderer->setOnKeyboardEventCallback(eventCallback, &eventData); // // Messages generated by the OpenVX framework will be processed by nvxio::stdoutLogCallback // vxRegisterLogCallback(context, &nvxio::stdoutLogCallback, vx_false_e); // // Create OpenVX Image to hold frames from video source // vx_image frameExemplar = vxCreateImage(context, sourceParams.frameWidth, sourceParams.frameHeight, sourceParams.format); NVXIO_CHECK_REFERENCE(frameExemplar); vx_delay frame_delay = vxCreateDelay(context, (vx_reference)frameExemplar, 2); NVXIO_CHECK_REFERENCE(frame_delay); vxReleaseImage(&frameExemplar); vx_image prevFrame = (vx_image)vxGetReferenceFromDelay(frame_delay, -1); vx_image frame = (vx_image)vxGetReferenceFromDelay(frame_delay, 0); // // Load mask image if needed // vx_image mask = NULL; #if defined USE_OPENCV || defined USE_GSTREAMER if (!maskFile.empty()) { mask = nvxio::loadImageFromFile(context, maskFile, VX_DF_IMAGE_U8); vx_uint32 mask_width = 0, mask_height = 0; NVXIO_SAFE_CALL( vxQueryImage(mask, VX_IMAGE_ATTRIBUTE_WIDTH, &mask_width, sizeof(mask_width)) ); NVXIO_SAFE_CALL( vxQueryImage(mask, VX_IMAGE_ATTRIBUTE_HEIGHT, &mask_height, sizeof(mask_height)) ); if (mask_width != sourceParams.frameWidth || mask_height != sourceParams.frameHeight) { std::cerr << "The mask must have the same size as the input source." << std::endl; return nvxio::Application::APP_EXIT_CODE_INVALID_DIMENSIONS; } } #endif // // Create FeatureTracker instance // std::unique_ptr<nvx::FeatureTracker> tracker(nvx::FeatureTracker::createHarrisPyrLK(context, params)); nvxio::FrameSource::FrameStatus frameStatus; do { frameStatus = source->fetch(frame); } while (frameStatus == nvxio::FrameSource::TIMEOUT); if (frameStatus == nvxio::FrameSource::CLOSED) { std::cerr << "Source has no frames" << std::endl; return nvxio::Application::APP_EXIT_CODE_NO_FRAMESOURCE; } tracker->init(frame, mask); vxAgeDelay(frame_delay); // // Run processing loop // nvx::Timer totalTimer; totalTimer.tic(); double proc_ms = 0; while (!eventData.shouldStop) { if (!eventData.pause) { frameStatus = source->fetch(frame); if (frameStatus == nvxio::FrameSource::TIMEOUT) { continue; } if (frameStatus == nvxio::FrameSource::CLOSED) { if (!source->open()) { std::cerr << "Failed to reopen the source" << std::endl; break; } continue; } // // Process // nvx::Timer procTimer; procTimer.tic(); tracker->track(frame, mask); proc_ms = procTimer.toc(); // // Print performance results // tracker->printPerfs(); } // // show the previous frame // renderer->putImage(prevFrame); // // Draw arrows & state // drawArrows(renderer.get(), tracker->getPrevFeatures(), tracker->getCurrFeatures()); double total_ms = totalTimer.toc(); std::cout << "Display Time : " << total_ms << " ms" << std::endl << std::endl; // // Add a delay to limit frame rate // app.sleepToLimitFPS(total_ms); total_ms = totalTimer.toc(); totalTimer.tic(); displayState(renderer.get(), sourceParams, proc_ms, total_ms); if (!renderer->flush()) { eventData.shouldStop = true; } if (!eventData.pause) { vxAgeDelay(frame_delay); } } // // Release all objects // vxReleaseImage(&mask); vxReleaseDelay(&frame_delay); } catch (const std::exception& e) { std::cerr << "Error: " << e.what() << std::endl; return nvxio::Application::APP_EXIT_CODE_ERROR; } return nvxio::Application::APP_EXIT_CODE_SUCCESS; }
VX_API_ENTRY vx_status VX_API_CALL vxReleaseContext(vx_context *c) { vx_status status = VX_SUCCESS; vx_context context = (c?*c:0); vx_uint32 r,m,a; vx_uint32 t; if (c) *c = 0; vxSemWait(&context_lock); if (vxIsValidContext(context) == vx_true_e) { if (vxDecrementReference(&context->base, VX_EXTERNAL) == 0) { vxDestroyThreadpool(&context->workers); context->proc.running = vx_false_e; vxPopQueue(&context->proc.input); vxJoinThread(context->proc.thread, NULL); vxDeinitQueue(&context->proc.output); vxDeinitQueue(&context->proc.input); /* Deregister any log callbacks if there is any registered */ vxRegisterLogCallback(context, NULL, vx_false_e); /*! \internal Garbage Collect All References */ /* Details: * 1. This loop will warn of references which have not been released by the user. * 2. It will close all internally opened error references. * 3. It will close the external references, which in turn will internally * close any internally dependent references that they reference, assuming the * reference counting has been done properly in the framework. * 4. This garbage collection must be done before the targets are released since some of * these external references may have internal references to target kernels. */ for (r = 0; r < VX_INT_MAX_REF; r++) { vx_reference_t *ref = context->reftable[r]; /* Warnings should only come when users have not released all external references */ if (ref && ref->external_count > 0) { VX_PRINT(VX_ZONE_WARNING,"Stale reference "VX_FMT_REF" of type %08x at external count %u, internal count %u\n", ref, ref->type, ref->external_count, ref->internal_count); } /* These were internally opened during creation, so should internally close ERRORs */ if(ref && ref->type == VX_TYPE_ERROR) { vxReleaseReferenceInt(&ref, ref->type, VX_INTERNAL, NULL); } /* Warning above so user can fix release external objects, but close here anyway */ while (ref && ref->external_count > 1) { vxDecrementReference(ref, VX_EXTERNAL); } if (ref && ref->external_count > 0) { vxReleaseReferenceInt(&ref, ref->type, VX_EXTERNAL, NULL); } } for (m = 0; m < context->num_modules; m++) { if (context->modules[m].handle) { vxUnloadModule(context->modules[m].handle); memset(context->modules[m].name, 0, sizeof(context->modules[m].name)); context->modules[m].handle = VX_MODULE_INIT; } } /* de-initialize and unload each target */ for (t = 0u; t < context->num_targets; t++) { if (context->targets[t].enabled == vx_true_e) { context->targets[t].funcs.deinit(&context->targets[t]); vxUnloadTarget(context, t, vx_true_e); context->targets[t].enabled = vx_false_e; } } /* Remove all outstanding accessors. */ for (a = 0; a < dimof(context->accessors); ++a) if (context->accessors[a].used) vxRemoveAccessor(context, a); /* Check for outstanding mappings */ for (a = 0; a < dimof(context->memory_maps); ++a) { if (context->memory_maps[a].used) { VX_PRINT(VX_ZONE_ERROR, "Memory map %d not unmapped\n", a); vxMemoryUnmap(context, a); } } vxDestroySem(&context->memory_maps_lock); /* By now, all external and internal references should be removed */ for (r = 0; r < VX_INT_MAX_REF; r++) { if(context->reftable[r]) VX_PRINT(VX_ZONE_ERROR,"Reference %d not removed\n", r); } #ifdef EXPERIMENTAL_USE_HEXAGON remote_handle_close(tmp_ph); #endif /*! \internal wipe away the context memory first */ /* Normally destroy sem is part of release reference, but can't for context */ vxDestroySem(&((vx_reference )context)->lock); memset(context, 0, sizeof(vx_context_t)); free((void *)context); vxDestroySem(&global_lock); vxSemPost(&context_lock); vxDestroySem(&context_lock); single_context = NULL; return status; } else { VX_PRINT(VX_ZONE_WARNING, "Context still has %u holders\n", vxTotalReferenceCount(&context->base)); } } else { status = VX_ERROR_INVALID_REFERENCE; } vxSemPost(&context_lock); return status; }
int main(int argc, char **argv) { int i; vx_status status; vx_set_debug_zone(VX_ZONE_ERROR); //vx_set_debug_zone(VX_ZONE_WARNING); //vx_set_debug_zone(VX_ZONE_INFO); vx_context context = vxCreateContext(); CHECK_NOT_NULL(context, "vxCreateContext"); printf("Success create vx_context!!\n\n"); vxInitLog(&helper_log); vxRegisterLogCallback(context, &vxHelperLogCallback, vx_false_e); Mat src = imread(SRC_IMG_NAME); CHECK_NOT_NULL(src.data, "imread"); resize(src, src, Size(IMG_WIDTH,IMG_HEIGHT)); cvtColor(src, src, CV_RGB2GRAY); for(i=0; i<1; i++) { Mat result_cv(IMG_HEIGHT,IMG_WIDTH,CV_8UC1); Mat result_vx(IMG_HEIGHT,IMG_WIDTH,CV_8UC1); printf("Start to run not_box3x3_graph()\n"); not_box3x3_cv(src.clone(), result_cv); status = not_box3x3_graph(context, src.clone(), result_vx); printf("Return from not_box3x3_graph() result_vx: %d\n", status); if(verify_result(result_cv, result_vx)) printf("Verify passed!!\n"); else printf("Verify fail!!\n"); printf("\n"); //imwrite("not_box3x3_cv.jpg",result_cv); //imwrite("not_box3x3_vx.jpg",result_vx); printf("Start to run not_not_graph()\n"); not_not_cv(src.clone(), result_cv); status = not_not_graph(context, src.clone(), result_vx); printf("Return from not_not_graph() result_vx: %d\n", status); if(verify_result(result_cv, result_vx)) printf("Verify passed!!\n"); else printf("Verify fail!!\n"); printf("\n"); printf("Start to run not_graph()\n"); not_cv(src.clone(), result_cv); status = not_graph(context, src.clone(), result_vx); printf("Return from not_not_graph() result_vx: %d\n", status); if(verify_result(result_cv, result_vx)) printf("Verify passed!!\n"); else printf("Verify fail!!\n"); printf("\n"); //imwrite("result_cv.jpg",result_cv); //imwrite("result_vx.jpg",result_vx); } status = vxReleaseContext(&context); CHECK_STATUS(status, "vxReleaseContext"); printf("%s done!!\n", argv[0]); return 0; }