/*! \brief The destructor to remove a user loaded module from OpenVX. * \param [in] context The handle to the implementation context. * \return A \ref vx_status_e enumeration. Returns errors if some or all kernels were not added * correctly. * \note This follows the function pointer definition of a \ref vx_unpublish_kernels_f * and uses the predefined name for the entry point, "vxUnpublishKernels". * \ingroup group_example_kernel */ /*VX_API_ENTRY*/ vx_status VX_API_CALL vxUnpublishKernels(vx_context context) { vx_status status = VX_FAILURE; vx_uint32 k = 0; for (k = 0; k < num_kernels; k++) { vx_kernel kernel = vxGetKernelByName(context, kernels[k]->name); vx_kernel kernelcpy = kernel; if (kernel) { status = vxReleaseKernel(&kernelcpy); if (status != VX_SUCCESS) { vxAddLogEntry((vx_reference)context, status, "Failed to release kernel[%u]=%s\n",k, kernels[k]->name); } else { kernelcpy = kernel; status = vxRemoveKernel(kernelcpy); if (status != VX_SUCCESS) { vxAddLogEntry((vx_reference)context, status, "Failed to remove kernel[%u]=%s\n",k, kernels[k]->name); } } } else { vxAddLogEntry((vx_reference)context, status, "Failed to get added kernel %s\n", kernels[k]->name); } } return status; }
/*! \brief The entry point into this module to add the base kernels to OpenVX. * \param context The handle to the implementation context. * \return vx_status Returns errors if some or all kernels were not added * correctly. * \ingroup group_implementation */ /*VX_API_ENTRY*/ vx_status VX_API_CALL vxPublishKernels(vx_context context) { vx_status status = VX_FAILURE; vx_uint32 p = 0, k = 0; for (k = 0; k < num_kernels; k++) { vx_kernel kernel = vxAddKernel(context, kernels[k]->name, kernels[k]->enumeration, kernels[k]->function, kernels[k]->numParams, kernels[k]->input_validate, kernels[k]->output_validate, kernels[k]->initialize, kernels[k]->deinitialize); if (kernel) { status = VX_SUCCESS; // temporary for (p = 0; p < kernels[k]->numParams; p++) { status = vxAddParameterToKernel(kernel, p, kernels[k]->parameters[p].direction, kernels[k]->parameters[p].data_type, kernels[k]->parameters[p].state); if (status != VX_SUCCESS) { vxAddLogEntry((vx_reference)context, status, "Failed to add parameter %d to kernel %s! (%d)\n", p, kernels[k]->name, status); break; } } if (status == VX_SUCCESS) { status = vxFinalizeKernel(kernel); if (status != VX_SUCCESS) { vxAddLogEntry((vx_reference)context, status, "Failed to finalize kernel[%u]=%s\n",k, kernels[k]->name); } } else { status = vxRemoveKernel(kernel); if (status != VX_SUCCESS) { vxAddLogEntry((vx_reference)context, status, "Failed to remove kernel[%u]=%s\n",k, kernels[k]->name); } } } else { vxAddLogEntry((vx_reference)context, status, "Failed to add kernel %s\n", kernels[k]->name); } } return status; }
static vx_status vxCheckBufferKernel(vx_node node, vx_reference *parameters, vx_uint32 num) { vx_status status = VX_SUCCESS; if (num == 3) { vx_scalar buffer = (vx_buffer)parameters[0]; vx_scalar fill = (vx_scalar)parameters[1]; vx_scalar errs = (vx_scalar)parameters[2]; vx_uint8 value = 0u; vx_size numUnits = 0ul, unitSize = 0ul, size = 0ul; vx_uint32 errors = 0; vx_uint8 *ptr = NULL; vxQueryBuffer(buffer, VX_BUFFER_ATTRIBUTE_NUMUNITS, &numUnits, sizeof(numUnits)); vxQueryBuffer(buffer, VX_BUFFER_ATTRIBUTE_UNITSIZE, &unitSize, sizeof(unitSize)); vxQueryBuffer(buffer, VX_BUFFER_ATTRIBUTE_SIZE, &size, sizeof(size)); vxAccessScalarValue(fill, (void *)&value); status = vxAccessBufferRange(buffer, 0, numUnits, (void **)&ptr); if (status == VX_SUCCESS) { vx_size i = 0; for (i = 0ul; i < size; i++) { if (ptr[i] != value) { errors++; } } vxCommitScalarValue(errs, &errors); if (errors > 0) { vxAddLogEntry(vxGetContext(node), VX_FAILURE, "Check buffer %p of "VX_FMT_SIZE" bytes with 0x%02x, found %u errors\n", ptr, size, value, errors); } status = vxCommitBufferRange(buffer, 0, numUnits, ptr); if (status != VX_SUCCESS) { vxAddLogEntry(vxGetContext(node), VX_FAILURE, "Failed to set buffer range for "VX_FMT_REF"\n", buffer); } } else { vxAddLogEntry(vxGetContext(node), VX_FAILURE, "Failed to get buffer range for "VX_FMT_REF"\n", buffer); } if (errors > 0) { status = VX_FAILURE; } } return status; }
static vx_pyramid vxCreatePyramidInt(vx_context context, vx_size levels, vx_float32 scale, vx_uint32 width, vx_uint32 height, vx_df_image format, vx_bool is_virtual) { vx_pyramid pyramid = NULL; if (vxIsValidContext(context) == vx_false_e) return NULL; if ((scale != VX_SCALE_PYRAMID_HALF) && (scale != VX_SCALE_PYRAMID_ORB)) { VX_PRINT(VX_ZONE_ERROR, "Invalid scale %lf for pyramid!\n",scale); vxAddLogEntry((vx_reference)context, VX_ERROR_INVALID_PARAMETERS, "Invalid scale %lf for pyramid!\n",scale); pyramid = (vx_pyramid_t *)vxGetErrorObject(context, VX_ERROR_INVALID_PARAMETERS); } else if (levels == 0 || levels > 8) { VX_PRINT(VX_ZONE_ERROR, "Invalid number of levels for pyramid!\n", levels); vxAddLogEntry((vx_reference)context, VX_ERROR_INVALID_PARAMETERS, "Invalid number of levels for pyramid!\n", levels); pyramid = (vx_pyramid_t *)vxGetErrorObject(context, VX_ERROR_INVALID_PARAMETERS); } else { pyramid = (vx_pyramid)vxCreateReference(context, VX_TYPE_PYRAMID, VX_EXTERNAL, &context->base); if (pyramid && pyramid->base.type == VX_TYPE_PYRAMID) { vx_status status; pyramid->base.is_virtual = is_virtual; status = vxInitPyramid(pyramid, levels, scale, width, height, format); if (status != VX_SUCCESS) { vxAddLogEntry((vx_reference)pyramid, status, "Failed to initialize pyramid\n"); vxReleasePyramid((vx_pyramid *)&pyramid); pyramid = (vx_pyramid_t *)vxGetErrorObject(context, status); } } else { VX_PRINT(VX_ZONE_ERROR, "Failed to allocate memory\n"); vxAddLogEntry((vx_reference)context, VX_ERROR_NO_MEMORY, "Failed to allocate memory\n"); pyramid = (vx_pyramid_t *)vxGetErrorObject(context, VX_ERROR_NO_MEMORY); } } return pyramid; }
vx_enum vxHarrisScoreSorter(vx_reference a, vx_reference b) { vx_keypoint ca = (vx_keypoint)a; vx_keypoint cb = (vx_keypoint)b; vx_keypoint_t kpa, *pkpa = &kpa; vx_keypoint_t kpb, *pkpb = &kpb; vx_enum cmp = VX_COMPARE_UNKNOWN; if ((vxAccessKeypoint(ca, &pkpa) == VX_SUCCESS) && (vxAccessKeypoint(cb, &pkpb) == VX_SUCCESS)) { if (kpa.strength < kpb.strength) cmp = VX_COMPARE_LT; else if (kpa.strength == kpb.strength) cmp = VX_COMPARE_EQ; else if (kpa.strength > kpb.strength) cmp = VX_COMPARE_GT; vxCommitKeypoint(ca, pkpa); vxCommitKeypoint(cb, pkpb); } if (cmp == VX_COMPARE_UNKNOWN) { vxAddLogEntry(a, VX_ERROR_INVALID_REFERENCE, "FATAL: References given to list sorter are not desired type!\n"); } return cmp; }
VX_API_ENTRY vx_distribution VX_API_CALL vxCreateDistribution(vx_context context, vx_size numBins, vx_int32 offset, vx_uint32 range) { vx_distribution distribution = NULL; if (vxIsValidContext(context) == vx_true_e) { if ((numBins != 0) && (range != 0)) { distribution = (vx_distribution)vxCreateReference(context, VX_TYPE_DISTRIBUTION, VX_EXTERNAL, &context->base); if ( vxGetStatus((vx_reference)distribution) == VX_SUCCESS && distribution->base.type == VX_TYPE_DISTRIBUTION) { distribution->memory.ndims = 2; distribution->memory.nptrs = 1; distribution->memory.strides[0][VX_DIM_C] = sizeof(vx_int32); distribution->memory.dims[0][VX_DIM_C] = 1; distribution->memory.dims[0][VX_DIM_X] = (vx_int32)numBins; distribution->memory.dims[0][VX_DIM_Y] = 1; distribution->memory.cl_type = CL_MEM_OBJECT_BUFFER; distribution->window_x = (vx_uint32)range/(vx_uint32)numBins; distribution->window_y = 1; distribution->offset_x = offset; distribution->offset_y = 0; } } else { VX_PRINT(VX_ZONE_ERROR, "Invalid parameters to distribution\n"); vxAddLogEntry(&context->base, VX_ERROR_INVALID_PARAMETERS, "Invalid parameters to distribution\n"); distribution = (vx_distribution)vxGetErrorObject(context, VX_ERROR_INVALID_PARAMETERS); } } return distribution; }
VX_API_ENTRY vx_parameter VX_API_CALL vxGetKernelParameterByIndex(vx_kernel kernel, vx_uint32 index) { vx_parameter parameter = NULL; if (vxIsValidSpecificReference(&kernel->base, VX_TYPE_KERNEL) == vx_true_e) { if (index < VX_INT_MAX_PARAMS && index < kernel->signature.num_parameters) { parameter = (vx_parameter)vxCreateReference(kernel->base.context, VX_TYPE_PARAMETER, VX_EXTERNAL, &kernel->base.context->base); if (parameter && parameter->base.type == VX_TYPE_PARAMETER) { parameter->index = index; parameter->node = NULL; parameter->kernel = kernel; vxIncrementReference(¶meter->kernel->base, VX_INTERNAL); } } else { vxAddLogEntry(&kernel->base, VX_ERROR_INVALID_PARAMETERS, "Index %u out of range for node %s (numparams = %u)!\n", index, kernel->name, kernel->signature.num_parameters); parameter = (vx_parameter_t *)vxGetErrorObject(kernel->base.context, VX_ERROR_INVALID_PARAMETERS); } } return parameter; }
VX_API_ENTRY vx_lut VX_API_CALL vxCreateLUT(vx_context context, vx_enum data_type, vx_size count) { vx_lut_t *lut = NULL; if (vxIsValidContext(context) == vx_true_e) { if (data_type == VX_TYPE_UINT8) { #if defined(OPENVX_STRICT_1_0) if (count != 256) { VX_PRINT(VX_ZONE_ERROR, "Invalid parameter to LUT\n"); vxAddLogEntry(&context->base, VX_ERROR_INVALID_PARAMETERS, "Invalid parameter to LUT\n"); lut = (vx_lut_t *)vxGetErrorObject(context, VX_ERROR_INVALID_PARAMETERS); } else #endif { lut = (vx_lut_t *)vxCreateArrayInt(context, VX_TYPE_UINT8, count, vx_false_e, VX_TYPE_LUT); if (vxGetStatus((vx_reference)lut) == VX_SUCCESS && lut->base.type == VX_TYPE_LUT) { lut->num_items = count; vxPrintArray(lut); } } } #if !defined(OPENVX_STRICT_1_0) else if (data_type == VX_TYPE_UINT16) { lut = (vx_lut_t *)vxCreateArrayInt(context, VX_TYPE_UINT16, count, vx_false_e, VX_TYPE_LUT); if (vxGetStatus((vx_reference)lut) == VX_SUCCESS && lut->base.type == VX_TYPE_LUT) { lut->num_items = count; vxPrintArray(lut); } } #endif else { VX_PRINT(VX_ZONE_ERROR, "Invalid data type\n"); vxAddLogEntry(&context->base, VX_ERROR_INVALID_TYPE, "Invalid data type\n"); lut = (vx_lut_t *)vxGetErrorObject(context, VX_ERROR_INVALID_TYPE); } } return (vx_lut)lut; }
vx_node vxCreateNodeByStructure(vx_graph graph, vx_enum kernelenum, vx_parameter_item_t *params, vx_uint32 num) { vx_status status = VX_SUCCESS; vx_node node = 0; vx_context context = vxGetContext(graph); vx_kernel kernel = vxGetKernelByEnum(context, kernelenum); if (kernel) { node = vxCreateNode(graph, kernel); if (node) { vx_uint32 p = 0; for (p = 0; p < num; p++) { status = vxSetParameterByIndex(node, p, params[p].direction, params[p].reference); if (status != VX_SUCCESS) { vxAddLogEntry(graph, status, "Kernel %d Parameter %u is invalid.\n", kernelenum, p); vxReleaseNode(&node); node = 0; break; } } } else { vxAddLogEntry(graph, VX_ERROR_INVALID_PARAMETERS, "Failed to create node with kernel enum %d\n", kernelenum); status = VX_ERROR_NO_MEMORY; } vxReleaseKernel(&kernel); } else { vxAddLogEntry(graph, VX_ERROR_INVALID_PARAMETERS, "failed to retrieve kernel enum %d\n", kernelenum); status = VX_ERROR_NOT_SUPPORTED; } return node; }
VX_API_ENTRY vx_parameter VX_API_CALL vxGetParameterByIndex(vx_node node, vx_uint32 index) { vx_parameter param = NULL; if (vxIsValidSpecificReference(&node->base, VX_TYPE_NODE) == vx_false_e) { return param; } if (node->kernel == NULL) { /* this can probably never happen */ vxAddLogEntry(&node->base, VX_ERROR_INVALID_NODE, "Node was created without a kernel! Fatal Error!\n"); param = (vx_parameter_t *)vxGetErrorObject(node->base.context, VX_ERROR_INVALID_NODE); } else { if (/*0 <= index &&*/ index < VX_INT_MAX_PARAMS && index < node->kernel->signature.num_parameters) { param = (vx_parameter)vxCreateReference(node->base.context, VX_TYPE_PARAMETER, VX_EXTERNAL, &node->base); if (param && param->base.type == VX_TYPE_PARAMETER) { param->index = index; param->node = node; vxIncrementReference(¶m->node->base, VX_INTERNAL); param->kernel = node->kernel; vxIncrementReference(¶m->kernel->base, VX_INTERNAL); // if (node->parameters[index]) // vxIncrementReference(node->parameters[index], VX_INTERNAL); } } else { vxAddLogEntry(&node->base, VX_ERROR_INVALID_PARAMETERS, "Index %u out of range for node %s (numparams = %u)!\n", index, node->kernel->name, node->kernel->signature.num_parameters); param = (vx_parameter_t *)vxGetErrorObject(node->base.context, VX_ERROR_INVALID_PARAMETERS); } } VX_PRINT(VX_ZONE_API, "%s: returning %p\n", __FUNCTION__, param); return param; }
//////// // User kernels needs to be registered with every OpenVX context before use in a graph. // // TODO STEP 04:******** // 1. Use vxAddUserKernel API to register "app.userkernels.tensor_cos" with // kernel enumeration = USER_KERNEL_TENSOR_COS, numParams = 2, and // all of the user kernel callback functions you implemented above. // 2. Use vxAddParameterToKernel API to specify direction, data_type, and // state of all 2 parameters to the kernel. Look into the comments of // userTensorCosNode function (above) to details about the order of // kernel parameters and their types. // 3. Use vxFinalizeKernel API to make the kernel ready to use in a graph. // Note that the kernel object is still valid after this call. // So you need to call vxReleaseKernel before returning from this function. vx_status registerUserKernel( vx_context context ) { // vx_kernel kernel = vxAddUserKernel( context, // "app.userkernels.tensor_cos", // /* Fill in parameters */ ); // ERROR_CHECK_OBJECT( kernel ); // ERROR_CHECK_STATUS( vxAddParameterToKernel( kernel, 0, VX_INPUT, VX_TYPE_TENSOR, VX_PARAMETER_STATE_REQUIRED ) ); // input // ERROR_CHECK_STATUS( vxAddParameterToKernel( kernel, 1, VX_OUTPUT, /* Fill in parameters */ ) ); // output // ERROR_CHECK_STATUS( vxFinalizeKernel( kernel ) ); // ERROR_CHECK_STATUS( vxReleaseKernel( &kernel ) ); vxAddLogEntry( ( vx_reference ) context, VX_SUCCESS, "OK: registered user kernel app.userkernels.tensor_cos\n" ); return VX_SUCCESS; }
vx_kernel vxGetKernelByEnum(vx_context c, vx_enum kernelenum) { vx_kernel_t *kern = NULL; vx_context_t *context = (vx_context_t *)c; vxPrintReference(&context->base); if (vxIsValidContext(context) == vx_true_e) { if (VX_KERNEL_INVALID >= kernelenum) { vxAddLogEntry(c, VX_ERROR_INVALID_PARAMETERS, "Invalid kernel enumeration (%d)\n", kernelenum); } else if (kernelenum > VX_KERNEL_INVALID) // no upper bound for kernel enum { vx_uint32 k = 0u, t = 0u; VX_PRINT(VX_ZONE_KERNEL,"Scanning for kernel enum %d out of %d kernels\n", kernelenum, context->numKernels); for (t = 0; t < context->numTargets; t++) { vx_target_t *target = &context->targets[context->priority_targets[t]]; VX_PRINT(VX_ZONE_KERNEL, "Checking Target[%u]=%s for %u kernels\n", context->priority_targets[t], target->name, target->numKernels); for (k = 0; k < target->numKernels; k++) { vx_kernel_t *kernel = &target->kernels[k]; if (kernel->enumeration == kernelenum) { kernel->affinity = context->priority_targets[t]; kern = kernel; vxIncrementReference(&kern->base); VX_PRINT(VX_ZONE_KERNEL,"Found Kernel[%u] enum:%d name:%s in target[%u]=%s\n", k, kernelenum, kern->name, context->priority_targets[t], target->name); break; } kernel = NULL; } if (kern != NULL) break; } } } else { VX_PRINT(VX_ZONE_ERROR, "Invalid context %p\n", context); } return (vx_kernel)kern; }
//////// // User kernels needs to be registered with every OpenVX context before use in a graph. // // TODO:******** // 1. Use vxAddUserKernel API to register "app.userkernels.tensor_cos" with // kernel enumeration = USER_KERNEL_TENSOR_COS, numParams = 2, and // all of the user kernel callback functions you implemented above. // 2. Use vxAddParameterToKernel API to specify direction, data_type, and // state of all 2 parameters to the kernel. Look into the comments of // userTensorCosNode function (above) to details about the order of // kernel parameters and their types. // 3. Use vxFinalizeKernel API to make the kernel ready to use in a graph. // Note that the kernel object is still valid after this call. // So you need to call vxReleaseKernel before returning from this function. vx_status registerUserKernel( vx_context context ) { vx_kernel kernel = vxAddUserKernel( context, "app.userkernels.tensor_cos", USER_KERNEL_TENSOR_COS, tensor_cos_host_side_function, 2, // numParams tensor_cos_validator, NULL, NULL ); ERROR_CHECK_OBJECT( kernel ); ERROR_CHECK_STATUS( vxAddParameterToKernel( kernel, 0, VX_INPUT, VX_TYPE_TENSOR, VX_PARAMETER_STATE_REQUIRED ) ); // input ERROR_CHECK_STATUS( vxAddParameterToKernel( kernel, 1, VX_OUTPUT, VX_TYPE_TENSOR, VX_PARAMETER_STATE_REQUIRED ) ); // output ERROR_CHECK_STATUS( vxFinalizeKernel( kernel ) ); ERROR_CHECK_STATUS( vxReleaseKernel( &kernel ) ); vxAddLogEntry( ( vx_reference ) context, VX_SUCCESS, "OK: registered user kernel app.userkernels.tensor_cos\n" ); return VX_SUCCESS; }
VX_API_ENTRY vx_reference VX_API_CALL vxGetReferenceByIndex(vx_import import, vx_uint32 index) { vx_reference ref = NULL; if (import && import->base.type == VX_TYPE_IMPORT) { if (index < import->count) { ref = (vx_reference_t *)import->refs[index]; vxIncrementReference(ref, VX_EXTERNAL); } else { VX_PRINT(VX_ZONE_ERROR, "Incorrect index value\n"); vxAddLogEntry(&import->base.context->base, VX_ERROR_INVALID_PARAMETERS, "Incorrect index value\n"); ref = (vx_reference_t *)vxGetErrorObject(import->base.context, VX_ERROR_INVALID_PARAMETERS); } } else { VX_PRINT(VX_ZONE_ERROR, "Invalid import reference!\n"); } return ref; }
VX_API_ENTRY vx_scalar VX_API_CALL vxCreateScalar(vx_context context, vx_enum data_type, void *ptr) { vx_scalar scalar = NULL; if (vxIsValidContext(context) == vx_false_e) return 0; if (!VX_TYPE_IS_SCALAR(data_type)) { VX_PRINT(VX_ZONE_ERROR, "Invalid type to scalar\n"); vxAddLogEntry(&context->base, VX_ERROR_INVALID_TYPE, "Invalid type to scalar\n"); scalar = (vx_scalar)vxGetErrorObject(context, VX_ERROR_INVALID_TYPE); } else { scalar = (vx_scalar)vxCreateReference(context, VX_TYPE_SCALAR, VX_EXTERNAL, &context->base); if (scalar && scalar->base.type == VX_TYPE_SCALAR) { scalar->data_type = data_type; vxCommitScalarValue(scalar, ptr); } } return (vx_scalar)scalar; }
vx_status vxSetAffineRotationMatrix(vx_matrix matrix, vx_float32 angle, vx_float32 scale, vx_float32 center_x, vx_float32 center_y) { vx_status status = VX_FAILURE; vx_float32 mat[3][2]; vx_size columns = 0ul, rows = 0ul; vx_enum type = 0; vxQueryMatrix(matrix, VX_MATRIX_ATTRIBUTE_COLUMNS, &columns, sizeof(columns)); vxQueryMatrix(matrix, VX_MATRIX_ATTRIBUTE_ROWS, &rows, sizeof(rows)); vxQueryMatrix(matrix, VX_MATRIX_ATTRIBUTE_TYPE, &type, sizeof(type)); if ((columns == 2) && (rows == 3) && (type == VX_TYPE_FLOAT32)) { status = vxAccessMatrix(matrix, mat); if (status == VX_SUCCESS) { vx_float32 radians = (angle / 360.0f) * (VX_TAU); vx_float32 a = scale * cos(radians); vx_float32 b = scale * sin(radians); mat[0][0] = a; mat[1][0] = b; mat[2][0] = ((1.0f - a) * center_x) - (b * center_y); mat[0][1] = -b; mat[1][1] = a; mat[2][1] = (b * center_y) + ((1.0f - a) * center_y); status = vxCommitMatrix(matrix, mat); } } else { vxAddLogEntry(matrix, status, "Failed to set affine matrix due to type or dimension mismatch!\n"); } return status; }
vx_status vxFReadImage(vx_array file, vx_image output) { vx_char *filename = NULL; vx_size filename_stride = 0; vx_uint8 *src = NULL; vx_uint32 p = 0u, y = 0u; vx_size planes = 0u; vx_imagepatch_addressing_t addr = {0}; vx_df_image format = VX_DF_IMAGE_VIRT; FILE *fp = NULL; vx_char tmp[VX_MAX_FILE_NAME] = {0}; vx_char *ext = NULL; vx_rectangle_t rect; vx_uint32 width = 0, height = 0; vx_status status = vxAccessArrayRange(file, 0, VX_MAX_FILE_NAME, &filename_stride, (void **)&filename, VX_READ_ONLY); if (status != VX_SUCCESS || filename_stride != sizeof(vx_char)) { vxAddLogEntry((vx_reference)file, VX_FAILURE, "Incorrect array "VX_FMT_REF"\n", file); return VX_FAILURE; } fp = fopen(filename, "rb"); if (fp == NULL) { vxAddLogEntry((vx_reference)file, VX_FAILURE, "Failed to open file %s\n",filename); return VX_FAILURE; } vxQueryImage(output, VX_IMAGE_PLANES, &planes, sizeof(planes)); vxQueryImage(output, VX_IMAGE_FORMAT, &format, sizeof(format)); ext = strrchr(filename, '.'); if (ext && (strcmp(ext, ".pgm") == 0 || strcmp(ext, ".PGM") == 0)) { FGETS(tmp, fp); // PX FGETS(tmp, fp); // comment FGETS(tmp, fp); // W H sscanf(tmp, "%u %u", &width, &height); FGETS(tmp, fp); // BPP // ! \todo double check image size? } else if (ext && (strcmp(ext, ".yuv") == 0 || strcmp(ext, ".rgb") == 0 || strcmp(ext, ".bw") == 0)) { sscanf(filename, "%*[^_]_%ux%u_%*s", &width, &height); } rect.start_x = rect.start_y = 0; rect.end_x = width; rect.end_y = height; for (p = 0; p < planes; p++) { status = vxAccessImagePatch(output, &rect, p, &addr, (void **)&src, VX_WRITE_ONLY); if (status == VX_SUCCESS) { for (y = 0; y < addr.dim_y; y+=addr.step_y) { vx_uint8 *srcp = vxFormatImagePatchAddress2d(src, 0, y, &addr); vx_size len = ((addr.dim_x * addr.scale_x)/VX_SCALE_UNITY); vx_size rlen = fread(srcp, addr.stride_x, len, fp); if (rlen != len) { status = VX_FAILURE; break; } } if (status == VX_SUCCESS) { status = vxCommitImagePatch(output, &rect, p, &addr, src); } if (status != VX_SUCCESS) { break; } } /* src pointer should be made NULL , otherwise the first plane data gets over written. */ src = NULL; } fclose(fp); vxCommitArrayRange(file, 0, 0, filename); return status; }
vx_status vxAccessArrayRangeInt(vx_array arr, vx_size start, vx_size end, vx_size *pStride, void **ptr, vx_enum usage) { vx_status status = VX_FAILURE; /* bad parameters */ if ((usage < VX_READ_ONLY) || (VX_READ_AND_WRITE < usage) || (ptr == NULL) || (start >= end) || (end > arr->num_items)) { return VX_ERROR_INVALID_PARAMETERS; } /* determine if virtual before checking for memory */ if (arr->base.is_virtual == vx_true_e) { if (arr->base.is_accessible == vx_false_e) { /* User tried to access a "virtual" array. */ VX_PRINT(VX_ZONE_ERROR, "Can not access a virtual array\n"); return VX_ERROR_OPTIMIZED_AWAY; } /* framework trying to access a virtual image, this is ok. */ } /* verify has not run or will not run yet. this allows this API to "touch" * the array to create it. */ if (vxAllocateArray(arr) == vx_false_e) { return VX_ERROR_NO_MEMORY; } /* POSSIBILITIES: * 1.) !*ptr && RO == COPY-ON-READ (make ptr=alloc) * 2.) !*ptr && WO == MAP * 3.) !*ptr && RW == MAP * 4.) *ptr && RO||RW == COPY (UNLESS MAP) */ /* MAP mode */ if (*ptr == NULL) { if ((usage == VX_WRITE_ONLY) || (usage == VX_READ_AND_WRITE)) { /*-- MAP --*/ status = VX_ERROR_NO_RESOURCES; /* lock the memory */ if(vxSemWait(&arr->memory.locks[0]) == vx_true_e) { vx_size offset = start * arr->item_size; *ptr = &arr->memory.ptrs[0][offset]; if (usage != VX_WRITE_ONLY) { vxReadFromReference(&arr->base); } vxIncrementReference(&arr->base, VX_EXTERNAL); status = VX_SUCCESS; } } else { /*-- COPY-ON-READ --*/ vx_size size = ((end - start) * arr->item_size); vx_uint32 a = 0u; vx_size *stride_save = calloc(1, sizeof(vx_size)); *stride_save = arr->item_size; if (vxAddAccessor(arr->base.context, size, usage, *ptr, &arr->base, &a, stride_save) == vx_true_e) { vx_size offset; *ptr = arr->base.context->accessors[a].ptr; offset = start * arr->item_size; memcpy(*ptr, &arr->memory.ptrs[0][offset], size); vxReadFromReference(&arr->base); vxIncrementReference(&arr->base, VX_EXTERNAL); status = VX_SUCCESS; } else { status = VX_ERROR_NO_MEMORY; vxAddLogEntry((vx_reference)arr, status, "Failed to allocate memory for COPY-ON-READ! Size="VX_FMT_SIZE"\n", size); } } if ((status == VX_SUCCESS) && (pStride != NULL)) { *pStride = arr->item_size; } } /* COPY mode */ else { vx_size size = ((end - start) * arr->item_size); vx_uint32 a = 0u; vx_size *stride_save = calloc(1, sizeof(vx_size)); if (pStride == NULL) { *stride_save = arr->item_size; pStride = stride_save; } else { *stride_save = *pStride; } if (vxAddAccessor(arr->base.context, size, usage, *ptr, &arr->base, &a, stride_save) == vx_true_e) { *ptr = arr->base.context->accessors[a].ptr; status = VX_SUCCESS; if ((usage == VX_WRITE_ONLY) || (usage == VX_READ_AND_WRITE)) { if (vxSemWait(&arr->memory.locks[0]) == vx_false_e) { status = VX_ERROR_NO_RESOURCES; } } if (status == VX_SUCCESS) { if (usage != VX_WRITE_ONLY) { int i; vx_uint8 *pSrc, *pDest; for (i = start, pDest = *ptr, pSrc = &arr->memory.ptrs[0][start * arr->item_size]; i < end; i++, pDest += *pStride, pSrc += arr->item_size) { memcpy(pDest, pSrc, arr->item_size); } vxReadFromReference(&arr->base); } vxIncrementReference(&arr->base, VX_EXTERNAL); } } else { status = VX_ERROR_NO_MEMORY; vxAddLogEntry((vx_reference)arr, status, "Failed to allocate memory for COPY-ON-READ! Size="VX_FMT_SIZE"\n", size); } } return status; }
static vx_status VX_CALLBACK vxChannelCombineOutputValidator(vx_node node, vx_uint32 index, vx_meta_format_t *ptr) { vx_status status = VX_ERROR_INVALID_PARAMETERS; if (index == 4) { vx_uint32 p, width = 0, height = 0; vx_uint32 uv_x_scale = 0, uv_y_scale = 0; vx_parameter params[] = { vxGetParameterByIndex(node, 0), vxGetParameterByIndex(node, 1), vxGetParameterByIndex(node, 2), vxGetParameterByIndex(node, 3), vxGetParameterByIndex(node, index) }; vx_bool planes_present[4] = { vx_false_e, vx_false_e, vx_false_e, vx_false_e }; /* check for equal plane sizes and determine plane presence */ for (p = 0; p < index; p++) { if (params[p]) { vx_image image = 0; vxQueryParameter(params[p], VX_PARAMETER_ATTRIBUTE_REF, &image, sizeof(image)); planes_present[p] = image != 0; if (image) { uint32_t w = 0, h = 0; vxQueryImage(image, VX_IMAGE_ATTRIBUTE_WIDTH, &w, sizeof(w)); vxQueryImage(image, VX_IMAGE_ATTRIBUTE_HEIGHT, &h, sizeof(h)); if (width == 0 && height == 0) { width = w; height = h; } else if (uv_x_scale == 0 && uv_y_scale == 0) { uv_x_scale = width == w ? 1 : (width == 2*w ? 2 : 0); uv_y_scale = height == h ? 1 : (height == 2*h ? 2 : 0); if (uv_x_scale == 0 || uv_y_scale == 0 || uv_y_scale > uv_x_scale) { status = VX_ERROR_INVALID_DIMENSION; vxAddLogEntry((vx_reference)image, status, "Input image channel %u does not match in dimensions!\n", p); goto exit; } } else if (width != w * uv_x_scale || height != h * uv_y_scale) { status = VX_ERROR_INVALID_DIMENSION; vxAddLogEntry((vx_reference)image, status, "Input image channel %u does not match in dimensions!\n", p); goto exit; } vxReleaseImage(&image); } } } if (params[index]) { vx_image output = 0; vxQueryParameter(params[index], VX_PARAMETER_ATTRIBUTE_REF, &output, sizeof(output)); if (output) { vx_df_image format = VX_DF_IMAGE_VIRT; vx_bool supported_format = vx_true_e; vx_bool correct_planes = planes_present[0] && planes_present[1] && planes_present[2]; vxQueryImage(output, VX_IMAGE_ATTRIBUTE_FORMAT, &format, sizeof(format)); switch (format) { case VX_DF_IMAGE_RGB: case VX_DF_IMAGE_YUV4: correct_planes = correct_planes && uv_y_scale == 1 && uv_x_scale == 1; break; case VX_DF_IMAGE_RGBX: correct_planes = correct_planes && planes_present[3] && uv_y_scale == 1 && uv_x_scale == 1; break; case VX_DF_IMAGE_YUYV: case VX_DF_IMAGE_UYVY: correct_planes = correct_planes && uv_y_scale == 1 && uv_x_scale == 2; break; case VX_DF_IMAGE_NV12: case VX_DF_IMAGE_NV21: case VX_DF_IMAGE_IYUV: correct_planes = correct_planes && uv_y_scale == 2 && uv_x_scale == 2; break; default: supported_format = vx_false_e; } if (supported_format) { if (correct_planes) { ptr->type = VX_TYPE_IMAGE; ptr->dim.image.format = format; ptr->dim.image.width = width; ptr->dim.image.height = height; status = VX_SUCCESS; } else { VX_PRINT(VX_ZONE_API, "Valid format but missing planes!\n"); } } vxReleaseImage(&output); } } exit: for (p = 0; p < dimof(params); p++) { if (params[p]) { vxReleaseParameter(¶ms[p]); } } } VX_PRINT(VX_ZONE_API, "%s:%u returned %d\n", __FUNCTION__, index, status); return status; }
static vx_status vxCheckImageKernel(vx_node node, vx_reference *parameters, vx_uint32 num) { vx_status status = VX_SUCCESS; if (num == 3) { vx_image image = (vx_image)parameters[0]; vx_scalar fill = (vx_scalar)parameters[1]; vx_scalar errs = (vx_scalar)parameters[2]; packed_value_u value; vx_uint32 planes = 0u, count = 0u, errors = 0u; vx_uint32 x = 0u, y = 0u, p = 0u; vx_int32 i = 0; vx_imagepatch_addressing_t addr; vx_rectangle rect; value.dword[0] = 0xDEADBEEF; vxAccessScalarValue(fill, &value.dword[0]); vxQueryImage(image, VX_IMAGE_ATTRIBUTE_PLANES, &planes, sizeof(planes)); rect = vxGetValidRegionImage(image); for (p = 0u; (p < planes) && (rect); p++) { void *ptr = NULL; status = vxAccessImagePatch(image, rect, p, &addr, &ptr); if ((status == VX_SUCCESS) && (ptr)) { for (y = 0; y < addr.dim_y; y+=addr.step_y) { for (x = 0; x < addr.dim_x; x+=addr.step_x) { vx_uint8 *pixel = vxFormatImagePatchAddress2d(ptr, x, y, &addr); for (i = 0; i < addr.stride_x; i++) { count++; if (pixel[i] != value.bytes[i]) { errors++; } } } } if (errors > 0) { vxAddLogEntry(vxGetContext(node), VX_FAILURE, "Checked %p of %u sub-pixels with 0x%08x with %u errors\n", ptr, count, value.dword, errors); } vxCommitScalarValue(errs, &errors); status = vxCommitImagePatch(image, 0, p, &addr, ptr); if (status != VX_SUCCESS) { vxAddLogEntry(vxGetContext(node), VX_FAILURE, "Failed to set image patch for "VX_FMT_REF"\n", image); } } else { vxAddLogEntry(vxGetContext(node), VX_FAILURE, "Failed to get image patch for "VX_FMT_REF"\n", image); } } vxReleaseRectangle(&rect); if (errors > 0) { status = VX_FAILURE; } } return status; }
vx_status vxFWriteImage(vx_image input, vx_array file) { vx_char *filename = NULL; vx_size filename_stride = 0; vx_uint8 *src[4] = {NULL, NULL, NULL, NULL}; vx_uint32 p, y, sx, ex, sy, ey, width, height; vx_size planes; vx_imagepatch_addressing_t addr[4]; vx_df_image format; FILE *fp = NULL; vx_char *ext = NULL; size_t wrote = 0ul; vx_rectangle_t rect; vx_status status = vxAccessArrayRange(file, 0, VX_MAX_FILE_NAME, &filename_stride, (void **)&filename, VX_READ_ONLY); if (status != VX_SUCCESS || filename_stride != sizeof(vx_char)) { vxCommitArrayRange(file, 0, 0, filename); vxAddLogEntry((vx_reference)file, VX_FAILURE, "Incorrect array "VX_FMT_REF"\n", file); return VX_FAILURE; } //VX_PRINT(VX_ZONE_INFO, "filename=%s\n",filename); fp = fopen(filename, "wb+"); if (fp == NULL) { vxCommitArrayRange(file, 0, 0, filename); vxAddLogEntry((vx_reference)file, VX_FAILURE, "Failed to open file %s\n",filename); return VX_FAILURE; } status |= vxQueryImage(input, VX_IMAGE_WIDTH, &width, sizeof(width)); status |= vxQueryImage(input, VX_IMAGE_HEIGHT, &height, sizeof(height)); status |= vxQueryImage(input, VX_IMAGE_PLANES, &planes, sizeof(planes)); status |= vxQueryImage(input, VX_IMAGE_FORMAT, &format, sizeof(format)); status |= vxGetValidRegionImage(input, &rect); sx = rect.start_x; sy = rect.start_y; ex = rect.end_x; ey = rect.end_y; ext = strrchr(filename, '.'); if (ext && (strcmp(ext, ".pgm") == 0 || strcmp(ext, ".PGM") == 0)) { fprintf(fp, "P5\n# %s\n",filename); fprintf(fp, "%u %u\n", width, height); if (format == VX_DF_IMAGE_U8) fprintf(fp, "255\n"); else if (format == VX_DF_IMAGE_S16) fprintf(fp, "65535\n"); else if (format == VX_DF_IMAGE_U16) fprintf(fp, "65535\n"); } for (p = 0u; p < planes; p++) { status |= vxAccessImagePatch(input, &rect, p, &addr[p], (void **)&src[p], VX_READ_ONLY); } for (p = 0u; (p < planes) && (status == VX_SUCCESS); p++) { size_t len = addr[p].stride_x * (addr[p].dim_x * addr[p].scale_x)/VX_SCALE_UNITY; for (y = 0u; y < height; y+=addr[p].step_y) { vx_uint32 i = 0; vx_uint8 *ptr = NULL; uint8_t value = 0u; if (y < sy || y >= ey) { for (i = 0; i < width; ++i) { wrote += fwrite(&value, sizeof(value), 1, fp); } continue; } for (i = 0; i < sx; ++i) wrote += fwrite(&value, sizeof(value), 1, fp); ptr = vxFormatImagePatchAddress2d(src[p], 0, y - sy, &addr[p]); wrote += fwrite(ptr, 1, len, fp); for (i = 0; i < width - ex; ++i) wrote += fwrite(&value, sizeof(value), 1, fp); } if (wrote == 0) { vxAddLogEntry((vx_reference)file, VX_FAILURE, "Failed to write to file!\n"); status = VX_FAILURE; break; } if (status == VX_FAILURE) { vxAddLogEntry((vx_reference)file, VX_FAILURE, "Failed to write image to file correctly\n"); break; } } for (p = 0u; p < planes; p++) { status |= vxCommitImagePatch(input, NULL, p, &addr[p], src[p]); } if (status != VX_SUCCESS) { vxAddLogEntry((vx_reference)file, VX_FAILURE, "Failed to write image to file correctly\n"); } fflush(fp); fclose(fp); if (vxCommitArrayRange(file, 0, 0, filename) != VX_SUCCESS) { vxAddLogEntry((vx_reference)file, VX_FAILURE, "Failed to release handle to filename array!\n"); } return status; }
vx_kernel vxAddTilingKernel(vx_context c, vx_char name[VX_MAX_KERNEL_NAME], vx_enum enumeration, vx_tiling_kernel_f func_ptr, vx_uint32 num_params, vx_kernel_input_validate_f input, vx_kernel_output_validate_f output) { vx_context_t *context = (vx_context_t *)c; vx_kernel kernel = 0; vx_uint32 t = 0; vx_size index = 0; vx_target_t *target = NULL; vx_char targetName[VX_MAX_TARGET_NAME]; if (vxIsValidContext(context) == vx_false_e) { VX_PRINT(VX_ZONE_ERROR, "Invalid Context\n"); return (vx_kernel)NULL; } if (func_ptr == NULL || input == NULL || output == NULL || num_params > VX_INT_MAX_PARAMS || num_params == 0 || name == NULL || strncmp(name, "", VX_MAX_KERNEL_NAME) == 0) /* initialize and de-initialize can be NULL */ { VX_PRINT(VX_ZONE_ERROR, "Invalid Parameters!\n"); vxAddLogEntry(c, VX_ERROR_INVALID_PARAMETERS, "Invalid Parameters supplied to vxAddKernel\n"); return (vx_kernel)NULL; } /* find target to assign this to */ index = strnindex(name, ':', VX_MAX_TARGET_NAME); if (index == VX_MAX_TARGET_NAME) { strcpy(targetName,"khronos.c_model"); } else { strncpy(targetName, name, index); } VX_PRINT(VX_ZONE_KERNEL, "Deduced Name as %s\n", targetName); for (t = 0u; t < context->numTargets; t++) { target = &context->targets[t]; if (strncmp(targetName,target->name, VX_MAX_TARGET_NAME) == 0) { break; } target = NULL; } if (target && target->funcs.addtilingkernel) { kernel = target->funcs.addtilingkernel(target, name, enumeration, func_ptr, num_params, input, output); VX_PRINT(VX_ZONE_KERNEL,"Added Kernel %s to Target %s ("VX_FMT_REF")\n", name, target->name, kernel); } else { vxAddLogEntry(c, VX_ERROR_NO_RESOURCES, "No target named %s exists!\n", targetName); } return (vx_kernel)kernel; }