VX_API_ENTRY vx_status VX_API_CALL vxCommitDistribution(vx_distribution distribution, const void *ptr) { vx_status status = VX_FAILURE; if ((vxIsValidSpecificReference(&distribution->base, VX_TYPE_DISTRIBUTION) == vx_true_e) && (vxAllocateMemory(distribution->base.context, &distribution->memory) == vx_true_e)) { if (ptr != NULL) { vxSemWait(&distribution->base.lock); { if (ptr != distribution->memory.ptrs[0]) { vx_size size = vxComputeMemorySize(&distribution->memory, 0); memcpy(distribution->memory.ptrs[0], ptr, size); VX_PRINT(VX_ZONE_INFO, "Copied distribution from %p to %p for "VX_FMT_SIZE" bytes\n", ptr, distribution->memory.ptrs[0], size); } } vxSemPost(&distribution->base.lock); vxWroteToReference(&distribution->base); } vxDecrementReference(&distribution->base, VX_EXTERNAL); status = VX_SUCCESS; } else { VX_PRINT(VX_ZONE_ERROR, "Not a valid object!\n"); } return status; }
static vx_value_t vxWorkerGraph(void *arg) { vx_processor_t *proc = (vx_processor_t *)arg; VX_PRINT(VX_ZONE_CONTEXT, "Starting thread!\n"); while (proc->running == vx_true_e) { vx_graph g = 0; vx_status s = VX_FAILURE; vx_value_set_t *data = NULL; if (vxReadQueue(&proc->input, &data) == vx_true_e) { g = (vx_graph)data->v1; // s = (vx_status)v2; VX_PRINT(VX_ZONE_CONTEXT, "Read graph=" VX_FMT_REF ", status=%d\n",g,s); s = vxProcessGraph(g); VX_PRINT(VX_ZONE_CONTEXT, "Writing graph=" VX_FMT_REF ", status=%d\n",g,s); data->v1 = (vx_value_t)g; data->v2 = (vx_status)s; if (vxWriteQueue(&proc->output, data) == vx_false_e) VX_PRINT(VX_ZONE_ERROR, "Failed to write graph=" VX_FMT_REF " status=%d\n", g, s); } } VX_PRINT(VX_ZONE_CONTEXT,"Stopping thread!\n"); return 0; }
vx_status vxAddParameterToKernel(vx_kernel kernel, vx_uint32 index, vx_enum dir, vx_enum type, vx_enum state) { vx_status status = VX_ERROR_INVALID_PARAMETERS; vx_kernel_t *kern = (vx_kernel_t *)kernel; VX_PRINT(VX_ZONE_KERNEL,"INFO: Adding index %u, type 0x%x, dir:%d state:%d\n", index, type, dir, state); if (vxIsValidSpecificReference(&kern->base, VX_TYPE_KERNEL) == vx_true_e) { if (index < kern->signature.numParams) { if (kern->tiling_function) { if (((type != VX_TYPE_IMAGE) && (type != VX_TYPE_SCALAR)) || (vxIsValidDirection(dir) == vx_false_e) || (vxIsValidState(state) == vx_false_e)) { status = VX_ERROR_INVALID_PARAMETERS; } else { kern->signature.directions[index] = dir; kern->signature.types[index] = type; kern->signature.states[index] = state; status = VX_SUCCESS; } } else { if ((vxIsValidType(type) == vx_false_e) || (vxIsValidDirection(dir) == vx_false_e) || (vxIsValidState(state) == vx_false_e)) { status = VX_ERROR_INVALID_PARAMETERS; } else { kern->signature.directions[index] = dir; kern->signature.types[index] = type; kern->signature.states[index] = state; status = VX_SUCCESS; } } } else { status = VX_ERROR_INVALID_PARAMETERS; } } else { VX_PRINT(VX_ZONE_ERROR, "Not a valid reference!\n"); status = VX_ERROR_INVALID_REFERENCE; } return status; }
VX_INT_API void vxMemoryUnmap(vx_context context, vx_uint32 map_id) { /* lock the table for modification */ if (vx_true_e == vxSemWait(&context->memory_maps_lock)) { if (context->memory_maps[map_id].used == vx_true_e) { if (context->memory_maps[map_id].ptr != NULL) { /* freeing mapped buffer */ free(context->memory_maps[map_id].ptr); memset(&context->memory_maps[map_id], 0, sizeof(vx_memory_map_t)); } VX_PRINT(VX_ZONE_CONTEXT, "Removed memory mapping[%u]\n", map_id); } context->memory_maps[map_id].used = vx_false_e; /* we're done, unlock the table */ vxSemPost(&context->memory_maps_lock); } else VX_PRINT(VX_ZONE_ERROR, "vxSemWait() failed!\n"); return; } /* vxMemoryUnmap() */
void vxReleaseKernel(vx_kernel *kernel) { vx_kernel_t *kern = (vx_kernel_t *)(kernel?*kernel:0); if (vxIsValidSpecificReference(&kern->base, VX_TYPE_KERNEL) == vx_true_e) { VX_PRINT(VX_ZONE_KERNEL, "Releasing kernel "VX_FMT_REF"\n", (void *)kern); vxDecrementReference(&kern->base); } else { VX_PRINT(VX_ZONE_ERROR, "Invalid Reference!\n"); } if (kernel) *kernel = 0; }
static vx_pyramid vxCreatePyramidInt(vx_context context, vx_size levels, vx_float32 scale, vx_uint32 width, vx_uint32 height, vx_df_image format, vx_bool is_virtual) { vx_pyramid pyramid = NULL; if (vxIsValidContext(context) == vx_false_e) return NULL; if ((scale != VX_SCALE_PYRAMID_HALF) && (scale != VX_SCALE_PYRAMID_ORB)) { VX_PRINT(VX_ZONE_ERROR, "Invalid scale %lf for pyramid!\n",scale); vxAddLogEntry((vx_reference)context, VX_ERROR_INVALID_PARAMETERS, "Invalid scale %lf for pyramid!\n",scale); pyramid = (vx_pyramid_t *)vxGetErrorObject(context, VX_ERROR_INVALID_PARAMETERS); } else if (levels == 0 || levels > 8) { VX_PRINT(VX_ZONE_ERROR, "Invalid number of levels for pyramid!\n", levels); vxAddLogEntry((vx_reference)context, VX_ERROR_INVALID_PARAMETERS, "Invalid number of levels for pyramid!\n", levels); pyramid = (vx_pyramid_t *)vxGetErrorObject(context, VX_ERROR_INVALID_PARAMETERS); } else { pyramid = (vx_pyramid)vxCreateReference(context, VX_TYPE_PYRAMID, VX_EXTERNAL, &context->base); if (pyramid && pyramid->base.type == VX_TYPE_PYRAMID) { vx_status status; pyramid->base.is_virtual = is_virtual; status = vxInitPyramid(pyramid, levels, scale, width, height, format); if (status != VX_SUCCESS) { vxAddLogEntry((vx_reference)pyramid, status, "Failed to initialize pyramid\n"); vxReleasePyramid((vx_pyramid *)&pyramid); pyramid = (vx_pyramid_t *)vxGetErrorObject(context, status); } } else { VX_PRINT(VX_ZONE_ERROR, "Failed to allocate memory\n"); vxAddLogEntry((vx_reference)context, VX_ERROR_NO_MEMORY, "Failed to allocate memory\n"); pyramid = (vx_pyramid_t *)vxGetErrorObject(context, VX_ERROR_NO_MEMORY); } } return pyramid; }
void vxPrintKernel(vx_kernel_t *kernel) { VX_PRINT(VX_ZONE_KERNEL, "kernel[%u] enabled?=%s %s \n", kernel->enumeration, (kernel->enabled?"TRUE":"FALSE"), kernel->name); }
vx_kernel vxTargetAddKernel(vx_target_t *target, vx_char name[VX_MAX_KERNEL_NAME], vx_enum enumeration, vx_kernel_f func_ptr, vx_uint32 numParams, vx_kernel_input_validate_f input, vx_kernel_output_validate_f output, vx_kernel_initialize_f initialize, vx_kernel_deinitialize_f deinitialize) { vx_uint32 k = 0u; vx_kernel_t *kernel = NULL; for (k = target->num_kernels; k < VX_INT_MAX_KERNELS; k++) { kernel = &(target->kernels[k]); if ((kernel->enabled == vx_false_e) && (kernel->enumeration == VX_KERNEL_INVALID)) { vxInitializeKernel(target->base.context, kernel, enumeration, func_ptr, name, NULL, numParams, input, output, initialize, deinitialize); VX_PRINT(VX_ZONE_KERNEL, "Reserving %s Kernel[%u] for %s\n", target->name, k, kernel->name); target->num_kernels++; break; } kernel = NULL; } return (vx_kernel)kernel; }
vx_status map_vx_image_for_write(vx_image vxImg, mem_info_t *mem_info) { uint32_t width, height; vx_status status = map_vx_image(vxImg, &width, &height, mem_info, VX_WRITE_ONLY); VX_PRINT(VX_ZONE_INFO, "map_vx_image_for_write returned %d\n", status); return status; }
void vxPrintReference(vx_reference_t *ref) { if (ref) { VX_PRINT(VX_ZONE_REFERENCE, "vx_reference_t:%p magic:%08x type:%08x count:[%u,%u] context:%p\n", ref, ref->magic, ref->type, ref->external_count, ref->internal_count, ref->context); } }
VX_API_ENTRY vx_status VX_API_CALL vxAccessDistribution(vx_distribution distribution, void **ptr, vx_enum usage) { vx_status status = VX_FAILURE; if ((vxIsValidSpecificReference(&distribution->base, VX_TYPE_DISTRIBUTION) == vx_true_e) && (vxAllocateMemory(distribution->base.context, &distribution->memory) == vx_true_e)) { if (ptr != NULL) { vxSemWait(&distribution->base.lock); { vx_size size = vxComputeMemorySize(&distribution->memory, 0); vxPrintMemory(&distribution->memory); if (*ptr == NULL) { *ptr = distribution->memory.ptrs[0]; } else if (*ptr != NULL) { memcpy(*ptr, distribution->memory.ptrs[0], size); } } vxSemPost(&distribution->base.lock); vxReadFromReference(&distribution->base); } vxIncrementReference(&distribution->base, VX_EXTERNAL); status = VX_SUCCESS; } else { VX_PRINT(VX_ZONE_ERROR, "Not a valid object!\n"); } return status; }
VX_API_ENTRY vx_distribution VX_API_CALL vxCreateDistribution(vx_context context, vx_size numBins, vx_int32 offset, vx_uint32 range) { vx_distribution distribution = NULL; if (vxIsValidContext(context) == vx_true_e) { if ((numBins != 0) && (range != 0)) { distribution = (vx_distribution)vxCreateReference(context, VX_TYPE_DISTRIBUTION, VX_EXTERNAL, &context->base); if ( vxGetStatus((vx_reference)distribution) == VX_SUCCESS && distribution->base.type == VX_TYPE_DISTRIBUTION) { distribution->memory.ndims = 2; distribution->memory.nptrs = 1; distribution->memory.strides[0][VX_DIM_C] = sizeof(vx_int32); distribution->memory.dims[0][VX_DIM_C] = 1; distribution->memory.dims[0][VX_DIM_X] = (vx_int32)numBins; distribution->memory.dims[0][VX_DIM_Y] = 1; distribution->memory.cl_type = CL_MEM_OBJECT_BUFFER; distribution->window_x = (vx_uint32)range/(vx_uint32)numBins; distribution->window_y = 1; distribution->offset_x = offset; distribution->offset_y = 0; } } else { VX_PRINT(VX_ZONE_ERROR, "Invalid parameters to distribution\n"); vxAddLogEntry(&context->base, VX_ERROR_INVALID_PARAMETERS, "Invalid parameters to distribution\n"); distribution = (vx_distribution)vxGetErrorObject(context, VX_ERROR_INVALID_PARAMETERS); } } return distribution; }
VX_INT_API vx_bool vxIsValidType(vx_enum type) { vx_bool ret = vx_false_e; if (type <= VX_TYPE_INVALID) { ret = vx_false_e; } else if (VX_TYPE_IS_SCALAR(type)) /* some scalar */ { ret = vx_true_e; } else if (VX_TYPE_IS_STRUCT(type)) /* some struct */ { ret = vx_true_e; } else if (VX_TYPE_IS_OBJECT(type)) /* some object */ { ret = vx_true_e; } #ifdef OPENVX_KHR_XML else if (type == VX_TYPE_IMPORT) /* import type extension */ { ret = vx_true_e; } #endif else { VX_PRINT(VX_ZONE_ERROR, "Type 0x%08x is invalid!\n"); } return ret; /* otherwise, not a valid type */ }
VX_API_ENTRY vx_lut VX_API_CALL vxCreateLUT(vx_context context, vx_enum data_type, vx_size count) { vx_lut_t *lut = NULL; if (vxIsValidContext(context) == vx_true_e) { if (data_type == VX_TYPE_UINT8) { #if defined(OPENVX_STRICT_1_0) if (count != 256) { VX_PRINT(VX_ZONE_ERROR, "Invalid parameter to LUT\n"); vxAddLogEntry(&context->base, VX_ERROR_INVALID_PARAMETERS, "Invalid parameter to LUT\n"); lut = (vx_lut_t *)vxGetErrorObject(context, VX_ERROR_INVALID_PARAMETERS); } else #endif { lut = (vx_lut_t *)vxCreateArrayInt(context, VX_TYPE_UINT8, count, vx_false_e, VX_TYPE_LUT); if (vxGetStatus((vx_reference)lut) == VX_SUCCESS && lut->base.type == VX_TYPE_LUT) { lut->num_items = count; vxPrintArray(lut); } } } #if !defined(OPENVX_STRICT_1_0) else if (data_type == VX_TYPE_UINT16) { lut = (vx_lut_t *)vxCreateArrayInt(context, VX_TYPE_UINT16, count, vx_false_e, VX_TYPE_LUT); if (vxGetStatus((vx_reference)lut) == VX_SUCCESS && lut->base.type == VX_TYPE_LUT) { lut->num_items = count; vxPrintArray(lut); } } #endif else { VX_PRINT(VX_ZONE_ERROR, "Invalid data type\n"); vxAddLogEntry(&context->base, VX_ERROR_INVALID_TYPE, "Invalid data type\n"); lut = (vx_lut_t *)vxGetErrorObject(context, VX_ERROR_INVALID_TYPE); } } return (vx_lut)lut; }
static vx_status VX_CALLBACK vxChannelExtractOutputValidator(vx_node node, vx_uint32 index, vx_meta_format_t *ptr) { vx_status status = VX_ERROR_INVALID_PARAMETERS; if (index == 2) { vx_parameter param0 = vxGetParameterByIndex(node, 0); vx_parameter param1 = vxGetParameterByIndex(node, 1); if ((param0) && (param1)) { vx_image input = 0; vx_scalar chan = 0; vx_enum channel = 0; vxQueryParameter(param0, VX_PARAMETER_ATTRIBUTE_REF, &input, sizeof(input)); vxQueryParameter(param1, VX_PARAMETER_ATTRIBUTE_REF, &chan, sizeof(chan)); vxReadScalarValue(chan, &channel); if ((input) && (chan)) { vx_uint32 width = 0, height = 0; vx_df_image format = VX_DF_IMAGE_VIRT; vxQueryImage(input, VX_IMAGE_ATTRIBUTE_WIDTH, &width, sizeof(width)); vxQueryImage(input, VX_IMAGE_ATTRIBUTE_HEIGHT, &height, sizeof(height)); vxQueryImage(input, VX_IMAGE_ATTRIBUTE_FORMAT, &format, sizeof(format)); if (channel != VX_CHANNEL_Y) switch (format) { case VX_DF_IMAGE_IYUV: case VX_DF_IMAGE_NV12: case VX_DF_IMAGE_NV21: width /= 2; height /= 2; break; case VX_DF_IMAGE_YUYV: case VX_DF_IMAGE_UYVY: width /= 2; break; } ptr->type = VX_TYPE_IMAGE; ptr->dim.image.format = VX_DF_IMAGE_U8; ptr->dim.image.width = width; ptr->dim.image.height = height; status = VX_SUCCESS; vxReleaseImage(&input); vxReleaseScalar(&chan); } vxReleaseParameter(¶m0); vxReleaseParameter(¶m1); } } else { status = VX_ERROR_INVALID_PARAMETERS; } VX_PRINT(VX_ZONE_API, "%s:%u returned %d\n", __FUNCTION__, index, status); return status; }
static void VX_CALLBACK vxcl_platform_notifier(const char *errinfo, const void *private_info, size_t cb, void *user_data) { //vx_target target = (vx_target)user_data; VX_PRINT(VX_ZONE_ERROR, "%s\n", errinfo); }
vx_kernel vxGetKernelByName(vx_context c, vx_char *name) { vx_kernel_t *kern = NULL; vx_context_t *context = (vx_context_t *)c; if (vxIsValidContext(context) == vx_true_e) { vx_uint32 k = 0u, t = 0u; VX_PRINT(VX_ZONE_KERNEL, "Scanning for kernel %s out of %d kernels\n", name, context->numKernels); for (t = 0; t < context->numTargets; t++) { vx_target_t *target = &context->targets[context->priority_targets[t]]; for (k = 0; k < target->numKernels; k++) { vx_kernel_t *kernel = &target->kernels[k]; vxPrintKernel(kernel); if ((kernel->enabled == vx_true_e) && (strncmp(kernel->name, name, VX_MAX_KERNEL_NAME) == 0)) { kernel->affinity = context->priority_targets[t]; kern = kernel; vxIncrementReference(&kern->base); break; } kernel = NULL; } if (kern != NULL) break; } } else { VX_PRINT(VX_ZONE_ERROR, "Invalid context %p\n", context); } if (kern == NULL) { VX_PRINT(VX_ZONE_ERROR, "Failed to find kernel %s\n", name); } else { VX_PRINT(VX_ZONE_KERNEL,"Found Kernel enum %d, name %s\n", kern->enumeration, kern->name); } return (vx_kernel)kern; }
void vxIncrementIntReference(vx_reference_t *ref) { if (ref) { vxSemWait(&ref->lock); ref->internal_count++; VX_PRINT(VX_ZONE_REFERENCE, "Incremented Internal Reference Count to %u on "VX_FMT_REF"\n", ref->internal_count, ref); vxSemPost(&ref->lock); } }
vx_kernel vxGetKernelByEnum(vx_context c, vx_enum kernelenum) { vx_kernel_t *kern = NULL; vx_context_t *context = (vx_context_t *)c; vxPrintReference(&context->base); if (vxIsValidContext(context) == vx_true_e) { if (VX_KERNEL_INVALID >= kernelenum) { vxAddLogEntry(c, VX_ERROR_INVALID_PARAMETERS, "Invalid kernel enumeration (%d)\n", kernelenum); } else if (kernelenum > VX_KERNEL_INVALID) // no upper bound for kernel enum { vx_uint32 k = 0u, t = 0u; VX_PRINT(VX_ZONE_KERNEL,"Scanning for kernel enum %d out of %d kernels\n", kernelenum, context->numKernels); for (t = 0; t < context->numTargets; t++) { vx_target_t *target = &context->targets[context->priority_targets[t]]; VX_PRINT(VX_ZONE_KERNEL, "Checking Target[%u]=%s for %u kernels\n", context->priority_targets[t], target->name, target->numKernels); for (k = 0; k < target->numKernels; k++) { vx_kernel_t *kernel = &target->kernels[k]; if (kernel->enumeration == kernelenum) { kernel->affinity = context->priority_targets[t]; kern = kernel; vxIncrementReference(&kern->base); VX_PRINT(VX_ZONE_KERNEL,"Found Kernel[%u] enum:%d name:%s in target[%u]=%s\n", k, kernelenum, kern->name, context->priority_targets[t], target->name); break; } kernel = NULL; } if (kern != NULL) break; } } } else { VX_PRINT(VX_ZONE_ERROR, "Invalid context %p\n", context); } return (vx_kernel)kern; }
vx_bool vxDecrementIntReference(vx_reference_t *ref) { vx_bool result = vx_false_e; if (ref) { vxSemWait(&ref->lock); if (ref->internal_count == 0) { VX_PRINT(VX_ZONE_WARNING, "#### INTERNAL REF COUNT IS ALREADY ZERO!!! "VX_FMT_REF" type:%08x #####\n", ref, ref->type); } else { ref->internal_count--; VX_PRINT(VX_ZONE_REFERENCE, "Decremented Internal Reference Count to %u on "VX_FMT_REF"\n", ref->internal_count, ref); result = vx_true_e; } vxSemPost(&ref->lock); } return result; }
vx_status vxFastCVInit() { char sVersion[32]; fcvSetOperationMode( (fcvOperationMode) FASTCV_OP_CPU_PERFORMANCE ); fcvGetVersion(sVersion, 32); VX_PRINT(VX_ZONE_INFO, "Using FastCV version version %s\n", sVersion); return VX_SUCCESS; }
static vx_bool vxWorkerNode(vx_threadpool_worker_t *worker) { vx_bool ret = vx_true_e; vx_target target = (vx_target)worker->data->v1; vx_node node = (vx_node)worker->data->v2; vx_action action = (vx_action)worker->data->v3; vx_uint32 p = 0; /* turn on access to virtual memory */ for (p = 0u; p < node->kernel->signature.num_parameters; p++) { if (node->parameters[p] == NULL) continue; if (node->parameters[p]->is_virtual == vx_true_e) { node->parameters[p]->is_accessible = vx_true_e; } } VX_PRINT(VX_ZONE_GRAPH, "Executing %s on target %s\n", node->kernel->name, target->name); action = target->funcs.process(target, &node, 0, 1); VX_PRINT(VX_ZONE_GRAPH, "Executed %s on target %s with action %d returned\n", node->kernel->name, target->name, action); /* turn on access to virtual memory */ for (p = 0u; p < node->kernel->signature.num_parameters; p++) { if (node->parameters[p] == NULL) continue; if (node->parameters[p]->is_virtual == vx_true_e) { // determine who is the last thread to release... // if this is an input, then there should be "zero" rectangles, which // should allow commits to work, even if the flag is lowered. // if this is an output, there should only be a single writer, so // no locks are needed. Bidirectional is not allowed to be virtual. node->parameters[p]->is_accessible = vx_true_e; } } if (action == VX_ACTION_ABANDON) { ret = vx_false_e; } // collect the specific results. worker->data->v3 = (vx_value_t)action; return ret; }
vx_status vxSetConvolutionAttribute(vx_convolution conv, vx_enum attr, void *ptr, vx_size size) { vx_status status = VX_SUCCESS; vx_convolution_t *convolution = (vx_convolution_t *)conv; if (vxIsValidSpecificReference(&convolution->base.base, VX_TYPE_CONVOLUTION) == vx_false_e) { return VX_ERROR_INVALID_REFERENCE; } switch (attr) { case VX_CONVOLUTION_ATTRIBUTE_SCALE: if (VX_CHECK_PARAM(ptr, size, vx_uint32, 0x3)) { vx_uint32 scale = *(vx_uint32 *)ptr; if (vxIsPowerOfTwo(scale) == vx_true_e) { VX_PRINT(VX_ZONE_INFO, "Convolution Scale assigned to %u\n", scale); convolution->scale = scale; } else { status = VX_ERROR_INVALID_VALUE; } } else { status = VX_ERROR_INVALID_PARAMETERS; } break; default: status = VX_ERROR_INVALID_PARAMETERS; break; } if (status != VX_SUCCESS) { VX_PRINT(VX_ZONE_ERROR, "Failed to set attribute on convolution! (%d)\n", status); } return status; }
vx_action vxTargetProcess(vx_target_t *target, vx_node_t *nodes[], vx_size startIndex, vx_size numNodes) { vx_action action = VX_ACTION_CONTINUE; vx_status status = VX_SUCCESS; vx_size n = 0; for (n = startIndex; (n < (startIndex + numNodes)) && (action == VX_ACTION_CONTINUE); n++) { VX_PRINT(VX_ZONE_GRAPH,"Executing Kernel %s:%d in Nodes[%u] on target %s\n", nodes[n]->kernel->name, nodes[n]->kernel->enumeration, n, nodes[n]->base.context->targets[nodes[n]->affinity].name); vxStartCapture(&nodes[n]->perf); status = nodes[n]->kernel->function((vx_node)nodes[n], (vx_reference *)nodes[n]->parameters, nodes[n]->kernel->signature.num_parameters); nodes[n]->executed = vx_true_e; nodes[n]->status = status; vxStopCapture(&nodes[n]->perf); VX_PRINT(VX_ZONE_GRAPH,"kernel %s returned %d\n", nodes[n]->kernel->name, status); if (status == VX_SUCCESS) { /* call the callback if it is attached */ if (nodes[n]->callback) { action = nodes[n]->callback((vx_node)nodes[n]); VX_PRINT(VX_ZONE_GRAPH,"callback returned action %d\n", action); } } else { action = VX_ACTION_ABANDON; VX_PRINT(VX_ZONE_ERROR, "Abandoning Graph due to error (%d)!\n", status); } } return action; }
static vx_status vxPhaseKernel(vx_node node, vx_reference *parameters, vx_uint32 num) { vx_status status = VX_FAILURE; if (num == 3) { vx_image grad_x = (vx_image)parameters[0]; vx_image grad_y = (vx_image)parameters[1]; vx_image output = (vx_image)parameters[2]; vx_uint32 y, x; vx_uint8 *dst_base = NULL; vx_int16 *src_base_x = NULL; vx_int16 *src_base_y = NULL; vx_imagepatch_addressing_t dst_addr, src_addr_x, src_addr_y; vx_rectangle rect; if (grad_x == 0 && grad_y == 0) return VX_ERROR_INVALID_PARAMETERS; rect = vxGetValidRegionImage(grad_x); status = VX_SUCCESS; status |= vxAccessImagePatch(grad_x, rect, 0, &src_addr_x, (void **)&src_base_x); status |= vxAccessImagePatch(grad_y, rect, 0, &src_addr_y, (void **)&src_base_y); status |= vxAccessImagePatch(output, rect, 0, &dst_addr, (void **)&dst_base); for (y = 0; y < dst_addr.dim_y; y++) { for (x = 0; x < dst_addr.dim_x; x++) { vx_int16 *in_x = vxFormatImagePatchAddress2d(src_base_x, x, y, &src_addr_x); vx_int16 *in_y = vxFormatImagePatchAddress2d(src_base_y, x, y, &src_addr_y); vx_uint8 *dst = vxFormatImagePatchAddress2d(dst_base, x, y, &dst_addr); /* -M_PI to M_PI */ double arct = atan2((double)in_y[0],(double)in_x[0]); /* 0.0 - 1.0 */ double norm = arct; if (arct < 0.0) { norm = VX_TAU + arct; } /* 0 - 255 */ *dst = (vx_uint8)((vx_uint32)(norm * 255u) & 0xFFu); if (in_y[0] != 0 || in_x[0] != 0) { VX_PRINT(VX_ZONE_INFO, "atan2(%d,%d) = %lf [norm=%lf] dst=%02x\n", in_y[0], in_x[0], arct, norm, *dst); } } } status |= vxCommitImagePatch(grad_x, 0, 0, &src_addr_x, src_base_x); status |= vxCommitImagePatch(grad_y, 0, 0, &src_addr_y, src_base_y); status |= vxCommitImagePatch(output, rect, 0, &dst_addr, dst_base); vxReleaseRectangle(&rect); } return status; }
vx_bool vxIsValidTypeMatch(vx_enum expected, vx_enum supplied) { vx_bool match = vx_false_e; if (expected == supplied) { match = vx_true_e; } if (match == vx_false_e) { VX_PRINT(VX_ZONE_ERROR, "Expected %08x and got %08x!\n", expected, supplied); } return match; }
VX_API_ENTRY vx_reference VX_API_CALL vxGetReferenceByIndex(vx_import import, vx_uint32 index) { vx_reference ref = NULL; if (import && import->base.type == VX_TYPE_IMPORT) { if (index < import->count) { ref = (vx_reference_t *)import->refs[index]; vxIncrementReference(ref, VX_EXTERNAL); } else { VX_PRINT(VX_ZONE_ERROR, "Incorrect index value\n"); vxAddLogEntry(&import->base.context->base, VX_ERROR_INVALID_PARAMETERS, "Incorrect index value\n"); ref = (vx_reference_t *)vxGetErrorObject(import->base.context, VX_ERROR_INVALID_PARAMETERS); } } else { VX_PRINT(VX_ZONE_ERROR, "Invalid import reference!\n"); } return ref; }
VX_API_ENTRY vx_status VX_API_CALL vxCommitLUT(vx_lut l, const void *ptr) { vx_status status = VX_FAILURE; vx_lut_t *lut = (vx_lut_t *)l; if (vxIsValidSpecificReference(&lut->base, VX_TYPE_LUT) == vx_true_e) { status = vxCommitArrayRangeInt((vx_array_t *)l, 0, lut->num_items, ptr); } else { VX_PRINT(VX_ZONE_ERROR, "Not a valid object!\n"); } return status; }
vx_bool vxIsValidReference(vx_reference_t * ref) { vx_bool ret = vx_false_e; if (ref != NULL) { vxPrintReference(ref); if ((ref->magic == VX_MAGIC) && (vxIsValidType(ref->type) && ref->type != VX_TYPE_CONTEXT) && (vxIsValidContext(ref->context) == vx_true_e)) { ret = vx_true_e; } else { VX_PRINT(VX_ZONE_ERROR, "%p is not a valid reference!\n", ref); } } else { VX_PRINT(VX_ZONE_ERROR, "Reference was NULL\n"); } return ret; }
vx_bool vxIsValidSpecificReference(vx_reference_t * ref, vx_enum type) { vx_bool ret = vx_false_e; if (ref != NULL) { vxPrintReference(ref); if ((ref->magic == VX_MAGIC) && (ref->type == type) && (vxIsValidContext(ref->context) == vx_true_e)) { ret = vx_true_e; } else { VX_PRINT(VX_ZONE_ERROR, "%p is not a valid reference!\n", ref); } } else { VX_PRINT(VX_ZONE_WARNING, "Reference was NULL\n"); } return ret; }