VX_API_ENTRY vx_status VX_API_CALL vxAccessDistribution(vx_distribution distribution, void **ptr, vx_enum usage) { vx_status status = VX_FAILURE; if ((vxIsValidSpecificReference(&distribution->base, VX_TYPE_DISTRIBUTION) == vx_true_e) && (vxAllocateMemory(distribution->base.context, &distribution->memory) == vx_true_e)) { if (ptr != NULL) { vxSemWait(&distribution->base.lock); { vx_size size = vxComputeMemorySize(&distribution->memory, 0); vxPrintMemory(&distribution->memory); if (*ptr == NULL) { *ptr = distribution->memory.ptrs[0]; } else if (*ptr != NULL) { memcpy(*ptr, distribution->memory.ptrs[0], size); } } vxSemPost(&distribution->base.lock); vxReadFromReference(&distribution->base); } vxIncrementReference(&distribution->base, VX_EXTERNAL); status = VX_SUCCESS; } else { VX_PRINT(VX_ZONE_ERROR, "Not a valid object!\n"); } return status; }
VX_API_ENTRY vx_parameter VX_API_CALL vxGetKernelParameterByIndex(vx_kernel kernel, vx_uint32 index) { vx_parameter parameter = NULL; if (vxIsValidSpecificReference(&kernel->base, VX_TYPE_KERNEL) == vx_true_e) { if (index < VX_INT_MAX_PARAMS && index < kernel->signature.num_parameters) { parameter = (vx_parameter)vxCreateReference(kernel->base.context, VX_TYPE_PARAMETER, VX_EXTERNAL, &kernel->base.context->base); if (parameter && parameter->base.type == VX_TYPE_PARAMETER) { parameter->index = index; parameter->node = NULL; parameter->kernel = kernel; vxIncrementReference(¶meter->kernel->base, VX_INTERNAL); } } else { vxAddLogEntry(&kernel->base, VX_ERROR_INVALID_PARAMETERS, "Index %u out of range for node %s (numparams = %u)!\n", index, kernel->name, kernel->signature.num_parameters); parameter = (vx_parameter_t *)vxGetErrorObject(kernel->base.context, VX_ERROR_INVALID_PARAMETERS); } } return parameter; }
VX_API_ENTRY vx_parameter VX_API_CALL vxGetParameterByIndex(vx_node node, vx_uint32 index) { vx_parameter param = NULL; if (vxIsValidSpecificReference(&node->base, VX_TYPE_NODE) == vx_false_e) { return param; } if (node->kernel == NULL) { /* this can probably never happen */ vxAddLogEntry(&node->base, VX_ERROR_INVALID_NODE, "Node was created without a kernel! Fatal Error!\n"); param = (vx_parameter_t *)vxGetErrorObject(node->base.context, VX_ERROR_INVALID_NODE); } else { if (/*0 <= index &&*/ index < VX_INT_MAX_PARAMS && index < node->kernel->signature.num_parameters) { param = (vx_parameter)vxCreateReference(node->base.context, VX_TYPE_PARAMETER, VX_EXTERNAL, &node->base); if (param && param->base.type == VX_TYPE_PARAMETER) { param->index = index; param->node = node; vxIncrementReference(¶m->node->base, VX_INTERNAL); param->kernel = node->kernel; vxIncrementReference(¶m->kernel->base, VX_INTERNAL); // if (node->parameters[index]) // vxIncrementReference(node->parameters[index], VX_INTERNAL); } } else { vxAddLogEntry(&node->base, VX_ERROR_INVALID_PARAMETERS, "Index %u out of range for node %s (numparams = %u)!\n", index, node->kernel->name, node->kernel->signature.num_parameters); param = (vx_parameter_t *)vxGetErrorObject(node->base.context, VX_ERROR_INVALID_PARAMETERS); } } VX_PRINT(VX_ZONE_API, "%s: returning %p\n", __FUNCTION__, param); return param; }
VX_API_ENTRY vx_image VX_API_CALL vxGetPyramidLevel(vx_pyramid pyramid, vx_uint32 index) { vx_image image = 0; if (vxIsValidSpecificReference(&pyramid->base, VX_TYPE_PYRAMID) == vx_true_e) { if (index < pyramid->numLevels) { image = pyramid->levels[index]; vxIncrementReference(&image->base, VX_EXTERNAL); } } return image; }
VX_API_ENTRY vx_reference VX_API_CALL vxGetReferenceByName(vx_import import, const vx_char *name) { vx_reference ref = NULL; if (import && import->base.type == VX_TYPE_IMPORT) { vx_uint32 index = 0; for (index = 0; index < import->count; index++) { if (strncmp(name, import->refs[index]->name, VX_MAX_REFERENCE_NAME) == 0) { ref = (vx_reference_t*)import->refs[index]; vxIncrementReference(ref, VX_EXTERNAL); break; } } } return ref; }
vx_status vxInitializeKernel(vx_context_t *context, vx_kernel_t *kernel, vx_enum kenum, vx_kernel_f function, vx_char name[VX_MAX_KERNEL_NAME], vx_param_description_t *parameters, vx_uint32 numParams, vx_kernel_input_validate_f in_validator, vx_kernel_output_validate_f out_validator, vx_kernel_initialize_f initialize, vx_kernel_deinitialize_f deinitialize) { if (kernel) { vxInitReference((vx_reference_t *)kernel, context, VX_TYPE_KERNEL); vxIncrementReference(&kernel->base); // setup the kernel meta-data strncpy(kernel->name, name, VX_MAX_KERNEL_NAME); kernel->enumeration = kenum; kernel->function = function; kernel->signature.numParams = numParams; kernel->validate_input = in_validator; kernel->validate_output = out_validator; kernel->initialize = initialize; kernel->deinitialize = deinitialize; kernel->attributes.borders.mode = VX_BORDER_MODE_UNDEFINED; kernel->attributes.borders.constant_value = 0; if (kernel->signature.numParams < VX_INT_MAX_PARAMS) { vx_uint32 p = 0; if (parameters != NULL) { for (p = 0; p < numParams; p++) { kernel->signature.directions[p] = parameters[p].direction; kernel->signature.types[p] = parameters[p].type; kernel->signature.states[p] = parameters[p].state; } kernel->enabled = vx_true_e; } } } return VX_SUCCESS; }
vx_kernel vxGetKernelByName(vx_context c, vx_char *name) { vx_kernel_t *kern = NULL; vx_context_t *context = (vx_context_t *)c; if (vxIsValidContext(context) == vx_true_e) { vx_uint32 k = 0u, t = 0u; VX_PRINT(VX_ZONE_KERNEL, "Scanning for kernel %s out of %d kernels\n", name, context->numKernels); for (t = 0; t < context->numTargets; t++) { vx_target_t *target = &context->targets[context->priority_targets[t]]; for (k = 0; k < target->numKernels; k++) { vx_kernel_t *kernel = &target->kernels[k]; vxPrintKernel(kernel); if ((kernel->enabled == vx_true_e) && (strncmp(kernel->name, name, VX_MAX_KERNEL_NAME) == 0)) { kernel->affinity = context->priority_targets[t]; kern = kernel; vxIncrementReference(&kern->base); break; } kernel = NULL; } if (kern != NULL) break; } } else { VX_PRINT(VX_ZONE_ERROR, "Invalid context %p\n", context); } if (kern == NULL) { VX_PRINT(VX_ZONE_ERROR, "Failed to find kernel %s\n", name); } else { VX_PRINT(VX_ZONE_KERNEL,"Found Kernel enum %d, name %s\n", kern->enumeration, kern->name); } return (vx_kernel)kern; }
vx_kernel vxGetKernelByEnum(vx_context c, vx_enum kernelenum) { vx_kernel_t *kern = NULL; vx_context_t *context = (vx_context_t *)c; vxPrintReference(&context->base); if (vxIsValidContext(context) == vx_true_e) { if (VX_KERNEL_INVALID >= kernelenum) { vxAddLogEntry(c, VX_ERROR_INVALID_PARAMETERS, "Invalid kernel enumeration (%d)\n", kernelenum); } else if (kernelenum > VX_KERNEL_INVALID) // no upper bound for kernel enum { vx_uint32 k = 0u, t = 0u; VX_PRINT(VX_ZONE_KERNEL,"Scanning for kernel enum %d out of %d kernels\n", kernelenum, context->numKernels); for (t = 0; t < context->numTargets; t++) { vx_target_t *target = &context->targets[context->priority_targets[t]]; VX_PRINT(VX_ZONE_KERNEL, "Checking Target[%u]=%s for %u kernels\n", context->priority_targets[t], target->name, target->numKernels); for (k = 0; k < target->numKernels; k++) { vx_kernel_t *kernel = &target->kernels[k]; if (kernel->enumeration == kernelenum) { kernel->affinity = context->priority_targets[t]; kern = kernel; vxIncrementReference(&kern->base); VX_PRINT(VX_ZONE_KERNEL,"Found Kernel[%u] enum:%d name:%s in target[%u]=%s\n", k, kernelenum, kern->name, context->priority_targets[t], target->name); break; } kernel = NULL; } if (kern != NULL) break; } } } else { VX_PRINT(VX_ZONE_ERROR, "Invalid context %p\n", context); } return (vx_kernel)kern; }
vx_threshold vxCreateThreshold(vx_context c, vx_enum type) { vx_context_t *context = (vx_context_t *)c; vx_threshold_t *thresh = NULL; if (vxIsValidThresholdType(type) == vx_true_e) { if (vxIsValidContext(context) == vx_true_e) { thresh = VX_CALLOC(vx_threshold_t); if (thresh) { vxInitReference(&thresh->base, context, VX_TYPE_THRESHOLD); vxIncrementReference(&thresh->base); vxAddReference(thresh->base.context, (vx_reference_t *)thresh); thresh->type = type; } } } return (vx_threshold)thresh; }
vx_status vxAccessConvolutionCoefficients(vx_convolution conv, vx_int16 *array) { vx_convolution_t *convolution = (vx_convolution_t *)conv; vx_status status = VX_ERROR_INVALID_REFERENCE; if ((vxIsValidSpecificReference(&convolution->base.base, VX_TYPE_CONVOLUTION) == vx_true_e) && (vxAllocateMemory(convolution->base.base.context, &convolution->base.memory) == vx_true_e)) { vxSemWait(&convolution->base.base.lock); if (array) { vx_size size = convolution->base.memory.strides[0][1] * convolution->base.memory.dims[0][1]; memcpy(array, convolution->base.memory.ptrs[0], size); } vxSemPost(&convolution->base.base.lock); vxReadFromReference(&convolution->base.base); vxIncrementReference(&convolution->base.base); status = VX_SUCCESS; } return status; }
vx_kernel_t *vxAllocateKernel(vx_context_t *context, vx_enum kenum, vx_kernel_f function, vx_char name[VX_MAX_KERNEL_NAME], vx_param_description_t *parameters, vx_uint32 numParams) { vx_kernel_t *kernel = VX_CALLOC(vx_kernel_t); if (kernel) { vxInitReference((vx_reference_t *)kernel, context, VX_TYPE_KERNEL); vxIncrementReference(&kernel->base); /* setup the kernel meta-data */ strncpy(kernel->name, name, VX_MAX_KERNEL_NAME); kernel->enumeration = kenum; kernel->function = function; kernel->signature.numParams = numParams; kernel->attributes.borders.mode = VX_BORDER_MODE_UNDEFINED; if (kernel->signature.numParams < VX_INT_MAX_PARAMS) { vx_uint32 p = 0; if (parameters != NULL) { for (p = 0; p < numParams; p++) { kernel->signature.directions[p] = parameters[p].direction; kernel->signature.types[p] = parameters[p].type; } } } else { free(kernel); kernel = NULL; } } return kernel; }
VX_API_ENTRY vx_reference VX_API_CALL vxGetReferenceByIndex(vx_import import, vx_uint32 index) { vx_reference ref = NULL; if (import && import->base.type == VX_TYPE_IMPORT) { if (index < import->count) { ref = (vx_reference_t *)import->refs[index]; vxIncrementReference(ref, VX_EXTERNAL); } else { VX_PRINT(VX_ZONE_ERROR, "Incorrect index value\n"); vxAddLogEntry(&import->base.context->base, VX_ERROR_INVALID_PARAMETERS, "Incorrect index value\n"); ref = (vx_reference_t *)vxGetErrorObject(import->base.context, VX_ERROR_INVALID_PARAMETERS); } } else { VX_PRINT(VX_ZONE_ERROR, "Invalid import reference!\n"); } return ref; }
vx_convolution vxCreateConvolution(vx_context context, vx_size columns, vx_size rows) { vx_convolution_t *convolution = NULL; if (vxIsValidContext((vx_context_t *)context) == vx_true_e && isodd(columns) && columns >= 3 && isodd(rows) && rows >= 3) { convolution = VX_CALLOC(vx_convolution_t); if (convolution) { vxInitReference(&convolution->base.base, (vx_context_t *)context, VX_TYPE_CONVOLUTION); vxIncrementReference(&convolution->base.base); vxAddReference(convolution->base.base.context, &convolution->base.base); convolution->base.type = VX_TYPE_INT16; convolution->base.columns = columns; convolution->base.rows = rows; convolution->base.memory.ndims = 2; convolution->base.memory.nptrs = 1; convolution->base.memory.dims[0][0] = sizeof(vx_int16); convolution->base.memory.dims[0][1] = (vx_int32)(columns*rows); convolution->scale = 1; } } return (vx_convolution)convolution; }
VX_API_ENTRY vx_context VX_API_CALL vxCreateContext() #endif { vx_context context = NULL; if (single_context == NULL) { vxCreateSem(&context_lock, 1); vxCreateSem(&global_lock, 1); } vxSemWait(&context_lock); if (single_context == NULL) { /* read the variables for debugging flags */ vx_set_debug_zone_from_env(); context = VX_CALLOC(vx_context_t); /* \todo get from allocator? */ if (context) { vx_uint32 p = 0u, p2 = 0u, t = 0u; context->p_global_lock = &global_lock; context->imm_border.mode = VX_BORDER_UNDEFINED; context->imm_border_policy = VX_BORDER_POLICY_DEFAULT_TO_UNDEFINED; context->next_dynamic_user_kernel_id = 0; context->next_dynamic_user_library_id = 1; vxInitReference(&context->base, NULL, VX_TYPE_CONTEXT, NULL); #if !DISABLE_ICD_COMPATIBILITY context->base.platform = platform; #endif vxIncrementReference(&context->base, VX_EXTERNAL); context->workers = vxCreateThreadpool(VX_INT_HOST_CORES, VX_INT_MAX_REF, /* very deep queues! */ sizeof(vx_work_t), vxWorkerNode, context); vxCreateConstErrors(context); #ifdef EXPERIMENTAL_USE_HEXAGON remote_handle_open((const char *)OPENVX_HEXAGON_NAME, &tmp_ph); #endif /* load all targets */ for (t = 0u; t < dimof(targetModules); t++) { if (vxLoadTarget(context, targetModules[t]) == VX_SUCCESS) { context->num_targets++; } } if (context->num_targets == 0) { VX_PRINT(VX_ZONE_ERROR, "No targets loaded!\n"); free(context); vxSemPost(&context_lock); return 0; } /* initialize all targets */ for (t = 0u; t < context->num_targets; t++) { if (context->targets[t].module.handle) { /* call the init function */ if (context->targets[t].funcs.init(&context->targets[t]) != VX_SUCCESS) { VX_PRINT(VX_ZONE_WARNING, "Target %s failed to initialize!\n", context->targets[t].name); /* unload this module */ vxUnloadTarget(context, t, vx_true_e); break; } else { context->targets[t].enabled = vx_true_e; } } } /* assign the targets by priority into the list */ p2 = 0u; for (p = 0u; p < VX_TARGET_PRIORITY_MAX; p++) { for (t = 0u; t < context->num_targets; t++) { vx_target_t * target = &context->targets[t]; if (p == target->priority) { context->priority_targets[p2] = t; p2++; } } } /* print out the priority list */ for (t = 0u; t < context->num_targets; t++) { vx_target_t *target = &context->targets[context->priority_targets[t]]; if (target->enabled == vx_true_e) { VX_PRINT(VX_ZONE_TARGET, "target[%u]: %s\n", target->priority, target->name); } } // create the internal thread which processes graphs for asynchronous mode. vxInitQueue(&context->proc.input); vxInitQueue(&context->proc.output); context->proc.running = vx_true_e; context->proc.thread = vxCreateThread(vxWorkerGraph, &context->proc); single_context = context; context->imm_target_enum = VX_TARGET_ANY; memset(context->imm_target_string, 0, sizeof(context->imm_target_string)); /* memory maps table lock */ vxCreateSem(&context->memory_maps_lock, 1); } } else { context = single_context; vxIncrementReference(&context->base, VX_EXTERNAL); } vxSemPost(&context_lock); return (vx_context)context; }
vx_status vxInitPyramid(vx_pyramid pyramid, vx_size levels, vx_float32 scale, vx_uint32 width, vx_uint32 height, vx_df_image format) { const vx_float32 c_orbscale[4] = {0.5f, VX_SCALE_PYRAMID_ORB, VX_SCALE_PYRAMID_ORB * VX_SCALE_PYRAMID_ORB, VX_SCALE_PYRAMID_ORB * VX_SCALE_PYRAMID_ORB * VX_SCALE_PYRAMID_ORB}; vx_status status = VX_SUCCESS; /* very first init will come in here */ if (pyramid->levels == NULL) { pyramid->numLevels = levels; pyramid->scale = scale; pyramid->levels = (vx_image *)calloc(levels, sizeof(vx_image_t *)); } /* these could be "virtual" values or hard values */ pyramid->width = width; pyramid->height = height; pyramid->format = format; if (pyramid->levels) { if (pyramid->width != 0 && pyramid->height != 0 && format != VX_DF_IMAGE_VIRT) { vx_int32 i; vx_uint32 w = pyramid->width; vx_uint32 h = pyramid->height; vx_uint32 ref_w = pyramid->width; vx_uint32 ref_h = pyramid->height; for (i = 0; i < pyramid->numLevels; i++) { vx_context c = (vx_context)pyramid->base.context; if (pyramid->levels[i] == 0) { pyramid->levels[i] = vxCreateImage(c, w, h, format); /* increment the internal counter on the image, not the external one */ vxIncrementReference((vx_reference_t *)pyramid->levels[i], VX_INTERNAL); vxDecrementReference((vx_reference_t *)pyramid->levels[i], VX_EXTERNAL); /* remember that the scope of the image is the pyramid */ ((vx_image_t *)pyramid->levels[i])->base.scope = (vx_reference_t *)pyramid; if (VX_SCALE_PYRAMID_ORB == scale) { vx_float32 orb_scale = c_orbscale[(i + 1) % 4]; w = (vx_uint32)ceilf((vx_float32)ref_w * orb_scale); h = (vx_uint32)ceilf((vx_float32)ref_h * orb_scale); if (0 == ((i + 1) % 4)) { ref_w = w; ref_h = h; } } else { w = (vx_uint32)ceilf((vx_float32)w * scale); h = (vx_uint32)ceilf((vx_float32)h * scale); } } } } else { /* virtual images, but in a pyramid we really need to know the * level 0 value. Dimensionless images don't work after validation * time. */ } } else { status = VX_ERROR_NO_MEMORY; } return status; }
vx_status vxAccessArrayRangeInt(vx_array arr, vx_size start, vx_size end, vx_size *pStride, void **ptr, vx_enum usage) { vx_status status = VX_FAILURE; /* bad parameters */ if ((usage < VX_READ_ONLY) || (VX_READ_AND_WRITE < usage) || (ptr == NULL) || (start >= end) || (end > arr->num_items)) { return VX_ERROR_INVALID_PARAMETERS; } /* determine if virtual before checking for memory */ if (arr->base.is_virtual == vx_true_e) { if (arr->base.is_accessible == vx_false_e) { /* User tried to access a "virtual" array. */ VX_PRINT(VX_ZONE_ERROR, "Can not access a virtual array\n"); return VX_ERROR_OPTIMIZED_AWAY; } /* framework trying to access a virtual image, this is ok. */ } /* verify has not run or will not run yet. this allows this API to "touch" * the array to create it. */ if (vxAllocateArray(arr) == vx_false_e) { return VX_ERROR_NO_MEMORY; } /* POSSIBILITIES: * 1.) !*ptr && RO == COPY-ON-READ (make ptr=alloc) * 2.) !*ptr && WO == MAP * 3.) !*ptr && RW == MAP * 4.) *ptr && RO||RW == COPY (UNLESS MAP) */ /* MAP mode */ if (*ptr == NULL) { if ((usage == VX_WRITE_ONLY) || (usage == VX_READ_AND_WRITE)) { /*-- MAP --*/ status = VX_ERROR_NO_RESOURCES; /* lock the memory */ if(vxSemWait(&arr->memory.locks[0]) == vx_true_e) { vx_size offset = start * arr->item_size; *ptr = &arr->memory.ptrs[0][offset]; if (usage != VX_WRITE_ONLY) { vxReadFromReference(&arr->base); } vxIncrementReference(&arr->base, VX_EXTERNAL); status = VX_SUCCESS; } } else { /*-- COPY-ON-READ --*/ vx_size size = ((end - start) * arr->item_size); vx_uint32 a = 0u; vx_size *stride_save = calloc(1, sizeof(vx_size)); *stride_save = arr->item_size; if (vxAddAccessor(arr->base.context, size, usage, *ptr, &arr->base, &a, stride_save) == vx_true_e) { vx_size offset; *ptr = arr->base.context->accessors[a].ptr; offset = start * arr->item_size; memcpy(*ptr, &arr->memory.ptrs[0][offset], size); vxReadFromReference(&arr->base); vxIncrementReference(&arr->base, VX_EXTERNAL); status = VX_SUCCESS; } else { status = VX_ERROR_NO_MEMORY; vxAddLogEntry((vx_reference)arr, status, "Failed to allocate memory for COPY-ON-READ! Size="VX_FMT_SIZE"\n", size); } } if ((status == VX_SUCCESS) && (pStride != NULL)) { *pStride = arr->item_size; } } /* COPY mode */ else { vx_size size = ((end - start) * arr->item_size); vx_uint32 a = 0u; vx_size *stride_save = calloc(1, sizeof(vx_size)); if (pStride == NULL) { *stride_save = arr->item_size; pStride = stride_save; } else { *stride_save = *pStride; } if (vxAddAccessor(arr->base.context, size, usage, *ptr, &arr->base, &a, stride_save) == vx_true_e) { *ptr = arr->base.context->accessors[a].ptr; status = VX_SUCCESS; if ((usage == VX_WRITE_ONLY) || (usage == VX_READ_AND_WRITE)) { if (vxSemWait(&arr->memory.locks[0]) == vx_false_e) { status = VX_ERROR_NO_RESOURCES; } } if (status == VX_SUCCESS) { if (usage != VX_WRITE_ONLY) { int i; vx_uint8 *pSrc, *pDest; for (i = start, pDest = *ptr, pSrc = &arr->memory.ptrs[0][start * arr->item_size]; i < end; i++, pDest += *pStride, pSrc += arr->item_size) { memcpy(pDest, pSrc, arr->item_size); } vxReadFromReference(&arr->base); } vxIncrementReference(&arr->base, VX_EXTERNAL); } } else { status = VX_ERROR_NO_MEMORY; vxAddLogEntry((vx_reference)arr, status, "Failed to allocate memory for COPY-ON-READ! Size="VX_FMT_SIZE"\n", size); } } return status; }
VX_API_ENTRY vx_status VX_API_CALL vxQueryParameter(vx_parameter parameter, vx_enum attribute, void *ptr, vx_size size) { vx_status status = VX_SUCCESS; if (vxIsValidSpecificReference(¶meter->base, VX_TYPE_PARAMETER) == vx_true_e) { switch (attribute) { case VX_PARAMETER_ATTRIBUTE_DIRECTION: if (VX_CHECK_PARAM(ptr, size, vx_enum, 0x3)) *(vx_enum *)ptr = parameter->kernel->signature.directions[parameter->index]; else status = VX_ERROR_INVALID_PARAMETERS; break; case VX_PARAMETER_ATTRIBUTE_INDEX: if (VX_CHECK_PARAM(ptr, size, vx_uint32, 0x3)) *(vx_uint32 *)ptr = parameter->index; else status = VX_ERROR_INVALID_PARAMETERS; break; case VX_PARAMETER_ATTRIBUTE_TYPE: if (VX_CHECK_PARAM(ptr, size, vx_enum, 0x3)) *(vx_enum *)ptr = parameter->kernel->signature.types[parameter->index]; else status = VX_ERROR_INVALID_PARAMETERS; break; case VX_PARAMETER_ATTRIBUTE_STATE: if (VX_CHECK_PARAM(ptr, size, vx_enum, 0x3)) *(vx_enum *)ptr = (vx_enum)parameter->kernel->signature.states[parameter->index]; else status = VX_ERROR_INVALID_PARAMETERS; break; case VX_PARAMETER_ATTRIBUTE_REF: if (VX_CHECK_PARAM(ptr, size, vx_reference, 0x3)) { if (parameter->node) { vx_reference_t *ref = parameter->node->parameters[parameter->index]; /* does this object have USER access? */ if (ref) { /*! \internal this could potentially allow the user to break * a currently chosen optimization! We need to alert the * system that if a write occurs to this data, put the graph * into an unverified state. */ if (ref->external_count == 0) ref->extracted = vx_true_e; vxIncrementReference(ref, VX_EXTERNAL); } *(vx_reference *)ptr = (vx_reference)ref; } else status = VX_ERROR_NOT_SUPPORTED; } else status = VX_ERROR_INVALID_PARAMETERS; break; default: status = VX_ERROR_NOT_SUPPORTED; break; } } else { status = VX_ERROR_INVALID_REFERENCE; } return status; }