static vx_status vxCheckBufferKernel(vx_node node, vx_reference *parameters, vx_uint32 num) { vx_status status = VX_SUCCESS; if (num == 3) { vx_scalar buffer = (vx_buffer)parameters[0]; vx_scalar fill = (vx_scalar)parameters[1]; vx_scalar errs = (vx_scalar)parameters[2]; vx_uint8 value = 0u; vx_size numUnits = 0ul, unitSize = 0ul, size = 0ul; vx_uint32 errors = 0; vx_uint8 *ptr = NULL; vxQueryBuffer(buffer, VX_BUFFER_ATTRIBUTE_NUMUNITS, &numUnits, sizeof(numUnits)); vxQueryBuffer(buffer, VX_BUFFER_ATTRIBUTE_UNITSIZE, &unitSize, sizeof(unitSize)); vxQueryBuffer(buffer, VX_BUFFER_ATTRIBUTE_SIZE, &size, sizeof(size)); vxAccessScalarValue(fill, (void *)&value); status = vxAccessBufferRange(buffer, 0, numUnits, (void **)&ptr); if (status == VX_SUCCESS) { vx_size i = 0; for (i = 0ul; i < size; i++) { if (ptr[i] != value) { errors++; } } vxCommitScalarValue(errs, &errors); if (errors > 0) { vxAddLogEntry(vxGetContext(node), VX_FAILURE, "Check buffer %p of "VX_FMT_SIZE" bytes with 0x%02x, found %u errors\n", ptr, size, value, errors); } status = vxCommitBufferRange(buffer, 0, numUnits, ptr); if (status != VX_SUCCESS) { vxAddLogEntry(vxGetContext(node), VX_FAILURE, "Failed to set buffer range for "VX_FMT_REF"\n", buffer); } } else { vxAddLogEntry(vxGetContext(node), VX_FAILURE, "Failed to get buffer range for "VX_FMT_REF"\n", buffer); } if (errors > 0) { status = VX_FAILURE; } } return status; }
VX_API_ENTRY vx_node VX_API_CALL vxArgmaxLayer(vx_graph graph, vx_tensor input, vx_reference output) { vx_node node = NULL; vx_context context = vxGetContext((vx_reference)graph); if (vxGetStatus((vx_reference)context) == VX_SUCCESS) { vx_reference params[] = { (vx_reference)input, (vx_reference)output }; node = createNode(graph, VX_KERNEL_ARGMAX_LAYER_AMD, params, sizeof(params) / sizeof(params[0])); } return node; }
//! [node] vx_node vxXYZNode(vx_graph graph, vx_image input, vx_uint32 value, vx_image output, vx_array temp) { vx_uint32 i; vx_node node = 0; vx_context context = vxGetContext((vx_reference)graph); vx_status status = vxLoadKernels(context, "xyz"); if (status == VX_SUCCESS) { //! [xyz node] vx_kernel kernel = vxGetKernelByName(context, VX_KERNEL_NAME_KHR_XYZ); if (kernel) { node = vxCreateGenericNode(graph, kernel); if (vxGetStatus((vx_reference)node) == VX_SUCCESS) { vx_status statuses[4]; vx_scalar scalar = vxCreateScalar(context, VX_TYPE_INT32, &value); statuses[0] = vxSetParameterByIndex(node, 0, (vx_reference)input); statuses[1] = vxSetParameterByIndex(node, 1, (vx_reference)scalar); statuses[2] = vxSetParameterByIndex(node, 2, (vx_reference)output); statuses[3] = vxSetParameterByIndex(node, 3, (vx_reference)temp); vxReleaseScalar(&scalar); for (i = 0; i < dimof(statuses); i++) { if (statuses[i] != VX_SUCCESS) { status = VX_ERROR_INVALID_PARAMETERS; vxReleaseNode(&node); vxReleaseKernel(&kernel); node = 0; kernel = 0; break; } } } else { vxReleaseKernel(&kernel); } } else { vxUnloadKernels(context, "xyz"); } //! [xyz node] } return node; }
//////// // The node creation interface for the "app.userkernels.tensor_cos" kernel. // This user kernel example expects parameters in the following order: // parameter #0 -- input tensor of format VX_TYPE_INT16 // parameter #1 -- output tensor of format VX_TYPE_INT16 // // TODO STEP 01:******** // 1. Use vxGetKernelByEnum API to get a kernel object from USER_KERNEL_TENSOR_COS. // Note that you need to use vxGetContext API to get the context from a graph object. // 2. Use vxCreateGenericNode API to create a node from the kernel object. // 3. Use vxSetParameterByIndex API to set node arguments. // 4. Release the kernel object that are not needed any more. // 5. Use ERROR_CHECK_OBJECT and ERROR_CHECK_STATUS macros for error detection. vx_node userTensorCosNode( vx_graph graph, vx_tensor input, vx_tensor output ) { vx_context context = vxGetContext( ( vx_reference ) graph ); vx_kernel kernel = vxGetKernelByEnum( context, USER_KERNEL_TENSOR_COS ); ERROR_CHECK_OBJECT( kernel ); vx_node node = vxCreateGenericNode( graph, kernel ); ERROR_CHECK_OBJECT( node ); // ERROR_CHECK_STATUS( vxSetParameterByIndex( node, 0, ( vx_reference ) /* Fill in parameter */ ) ); // ERROR_CHECK_STATUS( vxSetParameterByIndex( node, 1, ( vx_reference ) /* Fill in parameter */ ) ); ERROR_CHECK_STATUS( vxReleaseKernel( &kernel ) ); return node; }
vx_node vxCreateNodeByStructure(vx_graph graph, vx_enum kernelenum, vx_parameter_item_t *params, vx_uint32 num) { vx_status status = VX_SUCCESS; vx_node node = 0; vx_context context = vxGetContext(graph); vx_kernel kernel = vxGetKernelByEnum(context, kernelenum); if (kernel) { node = vxCreateNode(graph, kernel); if (node) { vx_uint32 p = 0; for (p = 0; p < num; p++) { status = vxSetParameterByIndex(node, p, params[p].direction, params[p].reference); if (status != VX_SUCCESS) { vxAddLogEntry(graph, status, "Kernel %d Parameter %u is invalid.\n", kernelenum, p); vxReleaseNode(&node); node = 0; break; } } } else { vxAddLogEntry(graph, VX_ERROR_INVALID_PARAMETERS, "Failed to create node with kernel enum %d\n", kernelenum); status = VX_ERROR_NO_MEMORY; } vxReleaseKernel(&kernel); } else { vxAddLogEntry(graph, VX_ERROR_INVALID_PARAMETERS, "failed to retrieve kernel enum %d\n", kernelenum); status = VX_ERROR_NOT_SUPPORTED; } return node; }
VX_API_ENTRY vx_node VX_API_CALL vxTensorAddNode(vx_graph graph, vx_tensor input1, vx_tensor input2, vx_enum policy, vx_tensor output) { vx_node node = NULL; vx_context context = vxGetContext((vx_reference)graph); if (vxGetStatus((vx_reference)context) == VX_SUCCESS) { vx_scalar s_policy = vxCreateScalarWithSize(context, VX_TYPE_ENUM, &policy, sizeof(policy)); if (vxGetStatus((vx_reference)s_policy) == VX_SUCCESS) { vx_reference params[] = { (vx_reference)input1, (vx_reference)input2, (vx_reference)s_policy, (vx_reference)output }; node = createNode(graph, VX_KERNEL_TENSOR_ADD, params, sizeof(params) / sizeof(params[0])); vxReleaseScalar(&s_policy); } } return node; }
VX_API_ENTRY vx_node vxROIPoolingLayer(vx_graph graph, vx_tensor input_data, vx_tensor input_rois, const vx_nn_roi_pool_params_t *roi_pool_params,vx_size size_of_roi_params, vx_tensor output_arr) { return NULL; vx_node node = NULL; vx_context context = vxGetContext((vx_reference)graph); if(vxGetStatus((vx_reference)context) == VX_SUCCESS) { vx_scalar roi_params = vxCreateScalarWithSize(context, VX_TYPE_NN_ROI_POOL_PARAMS, roi_pool_params, size_of_roi_params); if(vxGetStatus((vx_reference)roi_params) == VX_SUCCESS) { vx_reference params[] = { (vx_reference)input_data, (vx_reference)input_rois, (vx_reference)roi_params, (vx_reference)output_arr }; node = createNode(graph, VX_KERNEL_ROI_POOLING_LAYER, params, sizeof(params)/sizeof(params[0])); } vxReleaseScalar(&roi_params); } return node; }
vx_status vxGetLogEntry(vx_reference r, char message[VX_MAX_LOG_MESSAGE_LEN]) { vx_status status = VX_SUCCESS; vx_int32 cur = 0; vx_bool isContext = (vxGetContext(r) == 0 ? vx_true_e : vx_false_e); // if there's nothing in the helper_log return success if (helper_log.first == -1) { return VX_SUCCESS; } // first, match the reference to the parameter r // if active mark not active and copy and return. // if not active move on. for (cur = helper_log.first; cur != helper_log.last; cur = (cur + 1)%helper_log.count) { // if reference match or context was given if (((isContext == vx_true_e) || (r == helper_log.entries[cur].reference)) && (helper_log.entries[cur].active == vx_true_e)) { status = helper_log.entries[cur].status; strncpy(message, helper_log.entries[cur].message, VX_MAX_LOG_MESSAGE_LEN); helper_log.entries[cur].active = vx_false_e; if (cur == helper_log.first) { //printf("Aged out first entry!\n"); helper_log.first = (helper_log.first + 1)%helper_log.count; if (helper_log.first == helper_log.last) { helper_log.first = -1; //printf("Log is now empty!\n"); } } break; } } return status; }
VX_API_ENTRY vx_node VX_API_CALL vxConvertImageToTensorNode(vx_graph graph, vx_image input, vx_tensor output, vx_float32 a, vx_float32 b, vx_bool reverse_channel_order) { vx_node node = NULL; vx_context context = vxGetContext((vx_reference)graph); if (vxGetStatus((vx_reference)context) == VX_SUCCESS) { vx_scalar s_a = vxCreateScalarWithSize(context, VX_TYPE_FLOAT32, &a, sizeof(a)); vx_scalar s_b = vxCreateScalarWithSize(context, VX_TYPE_FLOAT32, &b, sizeof(b)); vx_scalar s_order = vxCreateScalarWithSize(context, VX_TYPE_BOOL, &reverse_channel_order, sizeof(reverse_channel_order)); if(vxGetStatus((vx_reference)s_order) == VX_SUCCESS) { vx_reference params[] = { (vx_reference)input, (vx_reference)output, (vx_reference)s_a, (vx_reference)s_b, (vx_reference)s_order }; node = createNode(graph, VX_KERNEL_CONVERT_IMAGE_TO_TENSOR_AMD, params, sizeof(params) / sizeof(params[0])); vxReleaseScalar(&s_a); vxReleaseScalar(&s_b); vxReleaseScalar(&s_order); } } return node; }
static vx_status vxEqualizeHistKernel(vx_node node, vx_reference *parameters, vx_uint32 num) { vx_status status = VX_FAILURE; if (num == 2) { vx_image src = (vx_image)parameters[0]; vx_image dst = (vx_image)parameters[1]; vx_uint32 y, x, width = 0, height = 0; void *src_base = NULL; void *dst_base = NULL; vx_imagepatch_addressing_t src_addr, dst_addr; vx_rectangle rect; status = VX_SUCCESS; status |= vxQueryImage(src, VX_IMAGE_ATTRIBUTE_WIDTH, &width, sizeof(width)); status |= vxQueryImage(src, VX_IMAGE_ATTRIBUTE_HEIGHT, &height, sizeof(height)); rect = vxCreateRectangle(vxGetContext(node), 0, 0, width, height); status |= vxAccessImagePatch(src, rect, 0, &src_addr, &src_base); status |= vxAccessImagePatch(dst, rect, 0, &dst_addr, &dst_base); if (status == VX_SUCCESS) { /* for 16-bit support (U16 or S16), the code can be duplicated with NUM_BINS = 65536 and PIXEL = vx_uint16. */ #define NUM_BINS 256 /* allocate a fixed-size temp array to store the image histogram & cumulative distribution */ vx_uint32 hist[NUM_BINS]; vx_uint32 cdf[NUM_BINS]; vx_uint32 sum = 0; vx_uint32 maxVal = 0; vx_float32 scaleFactor = 0.0f; /* calculate the distribution (histogram) */ memset(hist, 0, sizeof(hist)); for (y = 0; y < height; y++) { for (x = 0; x < width; x++) { vx_uint8 *src_ptr = vxFormatImagePatchAddress2d(src_base, x, y, &src_addr); vx_uint8 pixel = *src_ptr; hist[pixel]++; } } /* calculate the cumulative distribution (summed histogram) */ for (x = 0; x < NUM_BINS; x++) { cdf[x] = sum; sum += hist[x]; } /* find the scale factor from the max cdf value */ maxVal = cdf[0]; for (x = 1; x < NUM_BINS; x++) { if (maxVal < cdf[x]) { maxVal = cdf[x]; } } scaleFactor = 255.0f / (float)maxVal; //printf("* maxVal = %d, scaleFactor = %f\n", maxVal, scaleFactor); /* map the src pixel values to the equalized pixel values */ for (y = 0; y < height; y++) { for (x = 0; x < width; x++) { vx_uint8 *src_ptr = vxFormatImagePatchAddress2d(src_base, x, y, &src_addr); vx_uint8 *dst_ptr = vxFormatImagePatchAddress2d(dst_base, x, y, &dst_addr); vx_uint32 equalized_int = cdf[(*src_ptr)]; *dst_ptr = (vx_uint8)(equalized_int * scaleFactor + 0.5f); } } } status |= vxCommitImagePatch(src, 0, 0, &src_addr, src_base); status |= vxCommitImagePatch(dst, rect, 0, &dst_addr, dst_base); vxReleaseRectangle(&rect); } return status; }
static vx_status vxCheckImageKernel(vx_node node, vx_reference *parameters, vx_uint32 num) { vx_status status = VX_SUCCESS; if (num == 3) { vx_image image = (vx_image)parameters[0]; vx_scalar fill = (vx_scalar)parameters[1]; vx_scalar errs = (vx_scalar)parameters[2]; packed_value_u value; vx_uint32 planes = 0u, count = 0u, errors = 0u; vx_uint32 x = 0u, y = 0u, p = 0u; vx_int32 i = 0; vx_imagepatch_addressing_t addr; vx_rectangle rect; value.dword[0] = 0xDEADBEEF; vxAccessScalarValue(fill, &value.dword[0]); vxQueryImage(image, VX_IMAGE_ATTRIBUTE_PLANES, &planes, sizeof(planes)); rect = vxGetValidRegionImage(image); for (p = 0u; (p < planes) && (rect); p++) { void *ptr = NULL; status = vxAccessImagePatch(image, rect, p, &addr, &ptr); if ((status == VX_SUCCESS) && (ptr)) { for (y = 0; y < addr.dim_y; y+=addr.step_y) { for (x = 0; x < addr.dim_x; x+=addr.step_x) { vx_uint8 *pixel = vxFormatImagePatchAddress2d(ptr, x, y, &addr); for (i = 0; i < addr.stride_x; i++) { count++; if (pixel[i] != value.bytes[i]) { errors++; } } } } if (errors > 0) { vxAddLogEntry(vxGetContext(node), VX_FAILURE, "Checked %p of %u sub-pixels with 0x%08x with %u errors\n", ptr, count, value.dword, errors); } vxCommitScalarValue(errs, &errors); status = vxCommitImagePatch(image, 0, p, &addr, ptr); if (status != VX_SUCCESS) { vxAddLogEntry(vxGetContext(node), VX_FAILURE, "Failed to set image patch for "VX_FMT_REF"\n", image); } } else { vxAddLogEntry(vxGetContext(node), VX_FAILURE, "Failed to get image patch for "VX_FMT_REF"\n", image); } } vxReleaseRectangle(&rect); if (errors > 0) { status = VX_FAILURE; } } return status; }
static vx_status VX_CALLBACK vxHarrisInitializer(vx_node node, vx_reference parameters[], vx_uint32 num) { vx_status status = VX_FAILURE; if (num == dimof(harris_kernel_params)) { vx_image src = (vx_image)parameters[0]; vx_scalar str = (vx_scalar)parameters[1]; vx_scalar min = (vx_scalar)parameters[2]; vx_scalar sen = (vx_scalar)parameters[3]; vx_scalar win = (vx_scalar)parameters[4]; vx_scalar blk = (vx_scalar)parameters[5]; vx_array arr = (vx_array)parameters[6]; vx_scalar num_corners = (vx_scalar)parameters[7]; vx_context c = vxGetContext((vx_reference)node); vx_graph g = vxCreateGraph(c); vxLoadKernels(c, "openvx-extras"); vxLoadKernels(c, "openvx-debug"); if (g) { vx_uint32 i = 0; vx_int32 ds = 4; vx_scalar shift = vxCreateScalar(c, VX_TYPE_INT32, &ds); vx_image virts[] = { vxCreateVirtualImage(g, 0, 0, VX_DF_IMAGE_VIRT), // Gx vxCreateVirtualImage(g, 0, 0, VX_DF_IMAGE_VIRT), // Gy vxCreateVirtualImage(g, 0, 0, VX_DF_IMAGE_VIRT), // Score vxCreateVirtualImage(g, 0, 0, VX_DF_IMAGE_VIRT), // Suppressed vxCreateVirtualImage(g, 0, 0, VX_DF_IMAGE_U8), // Shifted Suppressed Log10 }; vx_node nodes[] = { vxSobelMxNNode(g, src, win, virts[0], virts[1]), vxHarrisScoreNode(g, virts[0], virts[1], sen, blk, virts[2]), vxEuclideanNonMaxNode(g, virts[2], str, min, virts[3]), vxImageListerNode(g, virts[3], arr, num_corners), #if defined(OPENVX_DEBUGGING) vxConvertDepthNode(g,virts[3],virts[4],VX_CONVERT_POLICY_WRAP,shift), vxFWriteImageNode(g,virts[4],"oharris_strength_power_up4.pgm"), #endif }; status = VX_SUCCESS; status |= vxAddParameterToGraphByIndex(g, nodes[0], 0); // src status |= vxAddParameterToGraphByIndex(g, nodes[2], 1); // str status |= vxAddParameterToGraphByIndex(g, nodes[2], 2); // min status |= vxAddParameterToGraphByIndex(g, nodes[1], 2); // sen status |= vxAddParameterToGraphByIndex(g, nodes[0], 1); // win status |= vxAddParameterToGraphByIndex(g, nodes[1], 3); // blk status |= vxAddParameterToGraphByIndex(g, nodes[3], 1); // arr status |= vxAddParameterToGraphByIndex(g, nodes[3], 2); // num_corners for (i = 0; i < dimof(nodes); i++) { vxReleaseNode(&nodes[i]); } for (i = 0; i < dimof(virts); i++) { vxReleaseImage(&virts[i]); } vxReleaseScalar(&shift); status |= vxVerifyGraph(g); VX_PRINT(VX_ZONE_INFO, "Status from Child Graph = %d\n", status); if (status == VX_SUCCESS) { status = vxSetChildGraphOfNode(node, g); } else { vxReleaseGraph(&g); } } } return status; }
static vx_status VX_CALLBACK vxHalfscaleGaussianInitializer(vx_node node, const vx_reference *parameters, vx_uint32 num) { vx_status status = VX_ERROR_INVALID_PARAMETERS; if (num == 3) { vx_image input = (vx_image)parameters[0]; vx_image output = (vx_image)parameters[1]; vx_int32 kernel_size = 3; vx_convolution convolution = 0; vx_context context = vxGetContext((vx_reference)node); vx_graph graph = vxCreateGraph(context); if (vxGetStatus((vx_reference)graph) == VX_SUCCESS) { vx_uint32 i; /* We have a child-graph; we want to make sure the parent graph is recognized as a valid scope for sake of virtual image parameters. */ graph->parentGraph = node->graph; vxReadScalarValue((vx_scalar)parameters[2], &kernel_size); if (kernel_size == 3 || kernel_size == 5) { if (kernel_size == 5) { convolution = vxCreateGaussian5x5Convolution(context); } if (kernel_size == 3 || convolution) { vx_image virt = vxCreateVirtualImage(graph, 0, 0, VX_DF_IMAGE_U8); vx_node nodes[] = { kernel_size == 3 ? vxGaussian3x3Node(graph, input, virt) : vxConvolveNode(graph, input, convolution, virt), vxScaleImageNode(graph, virt, output, VX_INTERPOLATION_TYPE_NEAREST_NEIGHBOR), }; vx_border_mode_t borders; vxQueryNode(node, VX_NODE_ATTRIBUTE_BORDER_MODE, &borders, sizeof(borders)); for (i = 0; i < dimof(nodes); i++) { vxSetNodeAttribute(nodes[i], VX_NODE_ATTRIBUTE_BORDER_MODE, &borders, sizeof(borders)); } status = VX_SUCCESS; status |= vxAddParameterToGraphByIndex(graph, nodes[0], 0); /* input image */ status |= vxAddParameterToGraphByIndex(graph, nodes[1], 1); /* output image */ status |= vxAddParameterToGraphByIndex(graph, node, 2); /* gradient size - refer to self to quiet sub-graph validator */ status |= vxVerifyGraph(graph); /* release our references, the graph will hold it's own */ for (i = 0; i < dimof(nodes); i++) { vxReleaseNode(&nodes[i]); } if (convolution) vxReleaseConvolution(&convolution); vxReleaseImage(&virt); status |= vxSetChildGraphOfNode(node, graph); } } vxReleaseGraph(&graph); } } return status; }