VX_API_ENTRY vx_distribution VX_API_CALL vxCreateDistribution(vx_context context, vx_size numBins, vx_int32 offset, vx_uint32 range) { vx_distribution distribution = NULL; if (vxIsValidContext(context) == vx_true_e) { if ((numBins != 0) && (range != 0)) { distribution = (vx_distribution)vxCreateReference(context, VX_TYPE_DISTRIBUTION, VX_EXTERNAL, &context->base); if ( vxGetStatus((vx_reference)distribution) == VX_SUCCESS && distribution->base.type == VX_TYPE_DISTRIBUTION) { distribution->memory.ndims = 2; distribution->memory.nptrs = 1; distribution->memory.strides[0][VX_DIM_C] = sizeof(vx_int32); distribution->memory.dims[0][VX_DIM_C] = 1; distribution->memory.dims[0][VX_DIM_X] = (vx_int32)numBins; distribution->memory.dims[0][VX_DIM_Y] = 1; distribution->memory.cl_type = CL_MEM_OBJECT_BUFFER; distribution->window_x = (vx_uint32)range/(vx_uint32)numBins; distribution->window_y = 1; distribution->offset_x = offset; distribution->offset_y = 0; } } else { VX_PRINT(VX_ZONE_ERROR, "Invalid parameters to distribution\n"); vxAddLogEntry(&context->base, VX_ERROR_INVALID_PARAMETERS, "Invalid parameters to distribution\n"); distribution = (vx_distribution)vxGetErrorObject(context, VX_ERROR_INVALID_PARAMETERS); } } return distribution; }
VX_API_ENTRY vx_status VX_API_CALL vxAllocateUserKernelLibraryId(vx_context context, vx_enum * pLibraryId) { vx_status status = VX_ERROR_INVALID_REFERENCE; if ((vxIsValidContext(context) == vx_true_e) && pLibraryId) { status = VX_ERROR_NO_RESOURCES; if(context->next_dynamic_user_library_id <= VX_LIBRARY(VX_LIBRARY_MASK)) { *pLibraryId = context->next_dynamic_user_library_id++; status = VX_SUCCESS; } } return status; }
static vx_pyramid vxCreatePyramidInt(vx_context context, vx_size levels, vx_float32 scale, vx_uint32 width, vx_uint32 height, vx_df_image format, vx_bool is_virtual) { vx_pyramid pyramid = NULL; if (vxIsValidContext(context) == vx_false_e) return NULL; if ((scale != VX_SCALE_PYRAMID_HALF) && (scale != VX_SCALE_PYRAMID_ORB)) { VX_PRINT(VX_ZONE_ERROR, "Invalid scale %lf for pyramid!\n",scale); vxAddLogEntry((vx_reference)context, VX_ERROR_INVALID_PARAMETERS, "Invalid scale %lf for pyramid!\n",scale); pyramid = (vx_pyramid_t *)vxGetErrorObject(context, VX_ERROR_INVALID_PARAMETERS); } else if (levels == 0 || levels > 8) { VX_PRINT(VX_ZONE_ERROR, "Invalid number of levels for pyramid!\n", levels); vxAddLogEntry((vx_reference)context, VX_ERROR_INVALID_PARAMETERS, "Invalid number of levels for pyramid!\n", levels); pyramid = (vx_pyramid_t *)vxGetErrorObject(context, VX_ERROR_INVALID_PARAMETERS); } else { pyramid = (vx_pyramid)vxCreateReference(context, VX_TYPE_PYRAMID, VX_EXTERNAL, &context->base); if (pyramid && pyramid->base.type == VX_TYPE_PYRAMID) { vx_status status; pyramid->base.is_virtual = is_virtual; status = vxInitPyramid(pyramid, levels, scale, width, height, format); if (status != VX_SUCCESS) { vxAddLogEntry((vx_reference)pyramid, status, "Failed to initialize pyramid\n"); vxReleasePyramid((vx_pyramid *)&pyramid); pyramid = (vx_pyramid_t *)vxGetErrorObject(context, status); } } else { VX_PRINT(VX_ZONE_ERROR, "Failed to allocate memory\n"); vxAddLogEntry((vx_reference)context, VX_ERROR_NO_MEMORY, "Failed to allocate memory\n"); pyramid = (vx_pyramid_t *)vxGetErrorObject(context, VX_ERROR_NO_MEMORY); } } return pyramid; }
VX_API_ENTRY vx_status VX_API_CALL vxAllocateUserKernelId(vx_context context, vx_enum * pKernelEnumId) { vx_status status = VX_ERROR_INVALID_REFERENCE; if ((vxIsValidContext(context) == vx_true_e) && pKernelEnumId) { status = VX_ERROR_NO_RESOURCES; if(context->next_dynamic_user_kernel_id <= VX_KERNEL_MASK) { *pKernelEnumId = VX_KERNEL_BASE(VX_ID_USER,0) + context->next_dynamic_user_kernel_id++; status = VX_SUCCESS; } } return status; }
VX_API_ENTRY vx_status VX_API_CALL vxSetContextAttribute(vx_context context, vx_enum attribute, const void *ptr, vx_size size) { vx_status status = VX_SUCCESS; if (vxIsValidContext(context) == vx_false_e) { status = VX_ERROR_INVALID_REFERENCE; } else { switch (attribute) { case VX_CONTEXT_IMMEDIATE_BORDER: if (VX_CHECK_PARAM(ptr, size, vx_border_t, 0x3)) { vx_border_t *config = (vx_border_t *)ptr; if (vxIsValidBorderMode(config->mode) == vx_false_e) status = VX_ERROR_INVALID_VALUE; else { context->imm_border = *config; } } else { status = VX_ERROR_INVALID_PARAMETERS; } break; case VX_CONTEXT_IMMEDIATE_BORDER_POLICY: if (VX_CHECK_PARAM(ptr, size, vx_enum, 0x3)) { vx_enum policy = *(vx_enum *)ptr; if (vxIsValidBorderModePolicy(policy) == vx_false_e) status = VX_ERROR_INVALID_VALUE; else { context->imm_border_policy = policy; } } else { status = VX_ERROR_INVALID_PARAMETERS; } break; default: status = VX_ERROR_NOT_SUPPORTED; break; } } return status; }
vx_meta_format vxCreateMetaFormat(vx_context context) { vx_meta_format meta = NULL; if (vxIsValidContext(context) == vx_true_e) { meta = (vx_meta_format)vxCreateReference(context, VX_TYPE_META_FORMAT, VX_EXTERNAL, &context->base); if (vxGetStatus((vx_reference)meta) == VX_SUCCESS) { meta->size = sizeof(vx_meta_format_t); meta->type = VX_TYPE_INVALID; } } return meta; }
VX_API_ENTRY vx_lut VX_API_CALL vxCreateLUT(vx_context context, vx_enum data_type, vx_size count) { vx_lut_t *lut = NULL; if (vxIsValidContext(context) == vx_true_e) { if (data_type == VX_TYPE_UINT8) { #if defined(OPENVX_STRICT_1_0) if (count != 256) { VX_PRINT(VX_ZONE_ERROR, "Invalid parameter to LUT\n"); vxAddLogEntry(&context->base, VX_ERROR_INVALID_PARAMETERS, "Invalid parameter to LUT\n"); lut = (vx_lut_t *)vxGetErrorObject(context, VX_ERROR_INVALID_PARAMETERS); } else #endif { lut = (vx_lut_t *)vxCreateArrayInt(context, VX_TYPE_UINT8, count, vx_false_e, VX_TYPE_LUT); if (vxGetStatus((vx_reference)lut) == VX_SUCCESS && lut->base.type == VX_TYPE_LUT) { lut->num_items = count; vxPrintArray(lut); } } } #if !defined(OPENVX_STRICT_1_0) else if (data_type == VX_TYPE_UINT16) { lut = (vx_lut_t *)vxCreateArrayInt(context, VX_TYPE_UINT16, count, vx_false_e, VX_TYPE_LUT); if (vxGetStatus((vx_reference)lut) == VX_SUCCESS && lut->base.type == VX_TYPE_LUT) { lut->num_items = count; vxPrintArray(lut); } } #endif else { VX_PRINT(VX_ZONE_ERROR, "Invalid data type\n"); vxAddLogEntry(&context->base, VX_ERROR_INVALID_TYPE, "Invalid data type\n"); lut = (vx_lut_t *)vxGetErrorObject(context, VX_ERROR_INVALID_TYPE); } } return (vx_lut)lut; }
VX_API_ENTRY vx_context VX_API_CALL vxGetContext(vx_reference reference) { vx_context context = NULL; if (vxIsValidReference(reference) == vx_true_e) { context = reference->context; } else if (vxIsValidContext((vx_context)reference) == vx_true_e) { context = (vx_context)reference; } else { VX_PRINT(VX_ZONE_ERROR, "%p is not a valid reference\n", reference); VX_BACKTRACE(VX_ZONE_ERROR); } return context; }
vx_kernel vxGetKernelByName(vx_context c, vx_char *name) { vx_kernel_t *kern = NULL; vx_context_t *context = (vx_context_t *)c; if (vxIsValidContext(context) == vx_true_e) { vx_uint32 k = 0u, t = 0u; VX_PRINT(VX_ZONE_KERNEL, "Scanning for kernel %s out of %d kernels\n", name, context->numKernels); for (t = 0; t < context->numTargets; t++) { vx_target_t *target = &context->targets[context->priority_targets[t]]; for (k = 0; k < target->numKernels; k++) { vx_kernel_t *kernel = &target->kernels[k]; vxPrintKernel(kernel); if ((kernel->enabled == vx_true_e) && (strncmp(kernel->name, name, VX_MAX_KERNEL_NAME) == 0)) { kernel->affinity = context->priority_targets[t]; kern = kernel; vxIncrementReference(&kern->base); break; } kernel = NULL; } if (kern != NULL) break; } } else { VX_PRINT(VX_ZONE_ERROR, "Invalid context %p\n", context); } if (kern == NULL) { VX_PRINT(VX_ZONE_ERROR, "Failed to find kernel %s\n", name); } else { VX_PRINT(VX_ZONE_KERNEL,"Found Kernel enum %d, name %s\n", kern->enumeration, kern->name); } return (vx_kernel)kern; }
vx_import vxCreateImportInt(vx_context context, vx_enum type, vx_uint32 count) { vx_import import = NULL; if (vxIsValidContext(context) == vx_false_e) return 0; import = (vx_import)vxCreateReference(context, VX_TYPE_IMPORT, VX_EXTERNAL, &context->base); if (import && import->base.type == VX_TYPE_IMPORT) { import->refs = (vx_reference *)calloc(count, sizeof(vx_reference)); import->type = type; import->count = count; VX_PRINT(VX_ZONE_INFO, "Creating Import of %u objects of type %x!\n", count, type); } return import; }
vx_kernel vxGetKernelByEnum(vx_context c, vx_enum kernelenum) { vx_kernel_t *kern = NULL; vx_context_t *context = (vx_context_t *)c; vxPrintReference(&context->base); if (vxIsValidContext(context) == vx_true_e) { if (VX_KERNEL_INVALID >= kernelenum) { vxAddLogEntry(c, VX_ERROR_INVALID_PARAMETERS, "Invalid kernel enumeration (%d)\n", kernelenum); } else if (kernelenum > VX_KERNEL_INVALID) // no upper bound for kernel enum { vx_uint32 k = 0u, t = 0u; VX_PRINT(VX_ZONE_KERNEL,"Scanning for kernel enum %d out of %d kernels\n", kernelenum, context->numKernels); for (t = 0; t < context->numTargets; t++) { vx_target_t *target = &context->targets[context->priority_targets[t]]; VX_PRINT(VX_ZONE_KERNEL, "Checking Target[%u]=%s for %u kernels\n", context->priority_targets[t], target->name, target->numKernels); for (k = 0; k < target->numKernels; k++) { vx_kernel_t *kernel = &target->kernels[k]; if (kernel->enumeration == kernelenum) { kernel->affinity = context->priority_targets[t]; kern = kernel; vxIncrementReference(&kern->base); VX_PRINT(VX_ZONE_KERNEL,"Found Kernel[%u] enum:%d name:%s in target[%u]=%s\n", k, kernelenum, kern->name, context->priority_targets[t], target->name); break; } kernel = NULL; } if (kern != NULL) break; } } } else { VX_PRINT(VX_ZONE_ERROR, "Invalid context %p\n", context); } return (vx_kernel)kern; }
VX_API_ENTRY vx_status VX_API_CALL vxHint(vx_reference reference, vx_enum hint, const void* data, vx_size data_size) { vx_status status = VX_SUCCESS; /* reference param should be a valid OpenVX reference*/ if (vxIsValidContext((vx_context)reference) == vx_false_e && vxIsValidReference(reference) == vx_false_e) return VX_ERROR_INVALID_REFERENCE; switch (hint) { /*! \todo add hints to the sample implementation */ default: status = VX_ERROR_NOT_SUPPORTED; break; } return status; }
vx_threshold vxCreateThreshold(vx_context c, vx_enum type) { vx_context_t *context = (vx_context_t *)c; vx_threshold_t *thresh = NULL; if (vxIsValidThresholdType(type) == vx_true_e) { if (vxIsValidContext(context) == vx_true_e) { thresh = VX_CALLOC(vx_threshold_t); if (thresh) { vxInitReference(&thresh->base, context, VX_TYPE_THRESHOLD); vxIncrementReference(&thresh->base); vxAddReference(thresh->base.context, (vx_reference_t *)thresh); thresh->type = type; } } } return (vx_threshold)thresh; }
VX_API_ENTRY vx_enum VX_API_CALL vxRegisterUserStruct(vx_context context, vx_size size) { vx_enum type = VX_TYPE_INVALID; vx_uint32 i = 0; if ((vxIsValidContext(context) == vx_true_e) && (size != 0)) { for (i = 0; i < VX_INT_MAX_USER_STRUCTS; ++i) { if (context->user_structs[i].type == VX_TYPE_INVALID) { context->user_structs[i].type = VX_TYPE_USER_STRUCT_START + i; context->user_structs[i].size = size; type = context->user_structs[i].type; break; } } } return type; }
VX_API_ENTRY vx_status VX_API_CALL vxDirective(vx_reference reference, vx_enum directive) { vx_status status = VX_SUCCESS; vx_context context; if (vxIsValidReference(reference) == vx_false_e) return VX_ERROR_INVALID_REFERENCE; context = reference->context; if (vxIsValidContext(context) == vx_false_e) return VX_ERROR_INVALID_REFERENCE; switch (directive) { case VX_DIRECTIVE_DISABLE_LOGGING: context->log_enabled = vx_false_e; break; case VX_DIRECTIVE_ENABLE_LOGGING: context->log_enabled = vx_true_e; break; default: status = VX_ERROR_NOT_SUPPORTED; break; } return status; }
vx_status vxQueryReference(vx_reference r, vx_enum attribute, void *ptr, vx_size size) { vx_status status = VX_SUCCESS; vx_reference_t *ref = (vx_reference_t *)r; /* if it is not a reference and not a context */ if ((vxIsValidReference(ref) == vx_false_e) && (vxIsValidContext((vx_context_t *)ref) == vx_false_e)) { return VX_ERROR_INVALID_REFERENCE; } switch (attribute) { case VX_REF_ATTRIBUTE_COUNT: if (VX_CHECK_PARAM(ptr, size, vx_uint32, 0x3)) { *(vx_uint32 *)ptr = ref->external_count; } else { status = VX_ERROR_INVALID_PARAMETERS; } break; case VX_REF_ATTRIBUTE_TYPE: if (VX_CHECK_PARAM(ptr, size, vx_enum, 0x3)) { *(vx_enum *)ptr = ref->type; } else { status = VX_ERROR_INVALID_PARAMETERS; } break; default: status = VX_ERROR_NOT_SUPPORTED; break; } return status; }
vx_bool vxIsValidReference(vx_reference_t * ref) { vx_bool ret = vx_false_e; if (ref != NULL) { vxPrintReference(ref); if ((ref->magic == VX_MAGIC) && (vxIsValidType(ref->type) && ref->type != VX_TYPE_CONTEXT) && (vxIsValidContext(ref->context) == vx_true_e)) { ret = vx_true_e; } else { VX_PRINT(VX_ZONE_ERROR, "%p is not a valid reference!\n", ref); } } else { VX_PRINT(VX_ZONE_ERROR, "Reference was NULL\n"); } return ret; }
vx_bool vxIsValidSpecificReference(vx_reference_t * ref, vx_enum type) { vx_bool ret = vx_false_e; if (ref != NULL) { vxPrintReference(ref); if ((ref->magic == VX_MAGIC) && (ref->type == type) && (vxIsValidContext(ref->context) == vx_true_e)) { ret = vx_true_e; } else { VX_PRINT(VX_ZONE_ERROR, "%p is not a valid reference!\n", ref); } } else { VX_PRINT(VX_ZONE_WARNING, "Reference was NULL\n"); } return ret; }
VX_API_ENTRY vx_status VX_API_CALL vxSetImmediateModeTarget(vx_context context, vx_enum target_enum, const char* target_string) { vx_status status = VX_ERROR_INVALID_REFERENCE; if (vxIsValidContext(context) == vx_true_e) { vx_target_t* target = NULL; switch (target_enum) { case VX_TARGET_ANY: context->imm_target_enum = VX_TARGET_ANY; memset(context->imm_target_string, 0, sizeof(context->imm_target_string)); status = VX_SUCCESS; break; case VX_TARGET_STRING: target = findTargetByString(context, target_string); if (target != NULL) /* target was found */ { context->imm_target_enum = VX_TARGET_STRING; strncpy(context->imm_target_string, target_string, sizeof(context->imm_target_string)); context->imm_target_string[sizeof(context->imm_target_string) - 1] = '\0'; status = VX_SUCCESS; } else /* target was not found */ { status = VX_ERROR_NOT_SUPPORTED; } break; default: status = VX_ERROR_NOT_SUPPORTED; break; } } return status; }
VX_API_ENTRY vx_array VX_API_CALL vxCreateArray(vx_context context, vx_enum item_type, vx_size capacity) { vx_array arr = NULL; if (vxIsValidContext(context) == vx_true_e) { if ( (vxIsValidArrayItemType(context, item_type) == vx_true_e) && (capacity > 0)) { arr = (vx_array)vxCreateArrayInt(context, item_type, capacity, vx_false_e, VX_TYPE_ARRAY); if (arr == NULL) { arr = (vx_array)vxGetErrorObject(context, VX_ERROR_NO_MEMORY); } } else { arr = (vx_array)vxGetErrorObject(context, VX_ERROR_INVALID_PARAMETERS); } } return arr; }
vx_convolution vxCreateConvolution(vx_context context, vx_size columns, vx_size rows) { vx_convolution_t *convolution = NULL; if (vxIsValidContext((vx_context_t *)context) == vx_true_e && isodd(columns) && columns >= 3 && isodd(rows) && rows >= 3) { convolution = VX_CALLOC(vx_convolution_t); if (convolution) { vxInitReference(&convolution->base.base, (vx_context_t *)context, VX_TYPE_CONVOLUTION); vxIncrementReference(&convolution->base.base); vxAddReference(convolution->base.base.context, &convolution->base.base); convolution->base.type = VX_TYPE_INT16; convolution->base.columns = columns; convolution->base.rows = rows; convolution->base.memory.ndims = 2; convolution->base.memory.nptrs = 1; convolution->base.memory.dims[0][0] = sizeof(vx_int16); convolution->base.memory.dims[0][1] = (vx_int32)(columns*rows); convolution->scale = 1; } } return (vx_convolution)convolution; }
VX_API_ENTRY vx_scalar VX_API_CALL vxCreateScalar(vx_context context, vx_enum data_type, void *ptr) { vx_scalar scalar = NULL; if (vxIsValidContext(context) == vx_false_e) return 0; if (!VX_TYPE_IS_SCALAR(data_type)) { VX_PRINT(VX_ZONE_ERROR, "Invalid type to scalar\n"); vxAddLogEntry(&context->base, VX_ERROR_INVALID_TYPE, "Invalid type to scalar\n"); scalar = (vx_scalar)vxGetErrorObject(context, VX_ERROR_INVALID_TYPE); } else { scalar = (vx_scalar)vxCreateReference(context, VX_TYPE_SCALAR, VX_EXTERNAL, &context->base); if (scalar && scalar->base.type == VX_TYPE_SCALAR) { scalar->data_type = data_type; vxCommitScalarValue(scalar, ptr); } } return (vx_scalar)scalar; }
vx_status vxLoadKernels(vx_context c, vx_char *name) { vx_context_t *context = (vx_context_t *)c; vx_status status = VX_FAILURE; vx_char module[VX_INT_MAX_PATH]; vx_uint32 m = 0; vx_publish_kernels_f publish = NULL; sprintf(module, VX_MODULE_NAME("%s"), (name?name:"openvx-ext")); if (vxIsValidContext(context) == vx_false_e) { VX_PRINT(VX_ZONE_ERROR, "Context is invalid!\n"); return VX_ERROR_INVALID_REFERENCE; } for (m = 0; m < VX_INT_MAX_MODULES; m++) { if (context->modules[m].handle == NULL) { context->modules[m].handle = vxLoadModule(module); if (context->modules[m].handle) { vx_symbol_t sym = vxGetSymbol(context->modules[m].handle, "vxPublishKernels"); publish = (vx_publish_kernels_f)sym; if (publish == NULL) { VX_PRINT(VX_ZONE_ERROR, "Failed to load symbol vxPublishKernels\n"); status = VX_ERROR_INVALID_MODULE; vxUnloadModule(context->modules[m].handle); context->modules[m].handle = NULL; } else { VX_PRINT(VX_ZONE_INFO, "Calling %s publish function\n", module); status = publish((vx_context)context); if (status != VX_SUCCESS) { VX_PRINT(VX_ZONE_ERROR, "Failed to publish kernels in module\n"); vxUnloadModule(context->modules[m].handle); context->modules[m].handle = NULL; } else { strncpy(context->modules[m].name, name, VX_INT_MAX_PATH); context->numMods++; } } } else { VX_PRINT(VX_ZONE_ERROR, "Failed to find module %s in libraries path\n", module); } break; } else { VX_PRINT(VX_ZONE_CONTEXT, "module[%u] is used\n", m); } } if (status != VX_SUCCESS) { VX_PRINT(VX_ZONE_ERROR, "Failed to load module %s; error %d\n", module, status); } else { for (m = 0; m < context->numMods; m++) { VX_PRINT(VX_ZONE_INFO, "Module: %s\n", context->modules[m].name); } } return status; }
VX_API_ENTRY vx_status VX_API_CALL vxReleaseContext(vx_context *c) { vx_status status = VX_SUCCESS; vx_context context = (c?*c:0); vx_uint32 r,m,a; vx_uint32 t; if (c) *c = 0; vxSemWait(&context_lock); if (vxIsValidContext(context) == vx_true_e) { if (vxDecrementReference(&context->base, VX_EXTERNAL) == 0) { vxDestroyThreadpool(&context->workers); context->proc.running = vx_false_e; vxPopQueue(&context->proc.input); vxJoinThread(context->proc.thread, NULL); vxDeinitQueue(&context->proc.output); vxDeinitQueue(&context->proc.input); /* Deregister any log callbacks if there is any registered */ vxRegisterLogCallback(context, NULL, vx_false_e); /*! \internal Garbage Collect All References */ /* Details: * 1. This loop will warn of references which have not been released by the user. * 2. It will close all internally opened error references. * 3. It will close the external references, which in turn will internally * close any internally dependent references that they reference, assuming the * reference counting has been done properly in the framework. * 4. This garbage collection must be done before the targets are released since some of * these external references may have internal references to target kernels. */ for (r = 0; r < VX_INT_MAX_REF; r++) { vx_reference_t *ref = context->reftable[r]; /* Warnings should only come when users have not released all external references */ if (ref && ref->external_count > 0) { VX_PRINT(VX_ZONE_WARNING,"Stale reference "VX_FMT_REF" of type %08x at external count %u, internal count %u\n", ref, ref->type, ref->external_count, ref->internal_count); } /* These were internally opened during creation, so should internally close ERRORs */ if(ref && ref->type == VX_TYPE_ERROR) { vxReleaseReferenceInt(&ref, ref->type, VX_INTERNAL, NULL); } /* Warning above so user can fix release external objects, but close here anyway */ while (ref && ref->external_count > 1) { vxDecrementReference(ref, VX_EXTERNAL); } if (ref && ref->external_count > 0) { vxReleaseReferenceInt(&ref, ref->type, VX_EXTERNAL, NULL); } } for (m = 0; m < context->num_modules; m++) { if (context->modules[m].handle) { vxUnloadModule(context->modules[m].handle); memset(context->modules[m].name, 0, sizeof(context->modules[m].name)); context->modules[m].handle = VX_MODULE_INIT; } } /* de-initialize and unload each target */ for (t = 0u; t < context->num_targets; t++) { if (context->targets[t].enabled == vx_true_e) { context->targets[t].funcs.deinit(&context->targets[t]); vxUnloadTarget(context, t, vx_true_e); context->targets[t].enabled = vx_false_e; } } /* Remove all outstanding accessors. */ for (a = 0; a < dimof(context->accessors); ++a) if (context->accessors[a].used) vxRemoveAccessor(context, a); /* Check for outstanding mappings */ for (a = 0; a < dimof(context->memory_maps); ++a) { if (context->memory_maps[a].used) { VX_PRINT(VX_ZONE_ERROR, "Memory map %d not unmapped\n", a); vxMemoryUnmap(context, a); } } vxDestroySem(&context->memory_maps_lock); /* By now, all external and internal references should be removed */ for (r = 0; r < VX_INT_MAX_REF; r++) { if(context->reftable[r]) VX_PRINT(VX_ZONE_ERROR,"Reference %d not removed\n", r); } #ifdef EXPERIMENTAL_USE_HEXAGON remote_handle_close(tmp_ph); #endif /*! \internal wipe away the context memory first */ /* Normally destroy sem is part of release reference, but can't for context */ vxDestroySem(&((vx_reference )context)->lock); memset(context, 0, sizeof(vx_context_t)); free((void *)context); vxDestroySem(&global_lock); vxSemPost(&context_lock); vxDestroySem(&context_lock); single_context = NULL; return status; } else { VX_PRINT(VX_ZONE_WARNING, "Context still has %u holders\n", vxTotalReferenceCount(&context->base)); } } else { status = VX_ERROR_INVALID_REFERENCE; } vxSemPost(&context_lock); return status; }
vx_kernel vxAddTilingKernel(vx_context c, vx_char name[VX_MAX_KERNEL_NAME], vx_enum enumeration, vx_tiling_kernel_f func_ptr, vx_uint32 num_params, vx_kernel_input_validate_f input, vx_kernel_output_validate_f output) { vx_context_t *context = (vx_context_t *)c; vx_kernel kernel = 0; vx_uint32 t = 0; vx_size index = 0; vx_target_t *target = NULL; vx_char targetName[VX_MAX_TARGET_NAME]; if (vxIsValidContext(context) == vx_false_e) { VX_PRINT(VX_ZONE_ERROR, "Invalid Context\n"); return (vx_kernel)NULL; } if (func_ptr == NULL || input == NULL || output == NULL || num_params > VX_INT_MAX_PARAMS || num_params == 0 || name == NULL || strncmp(name, "", VX_MAX_KERNEL_NAME) == 0) /* initialize and de-initialize can be NULL */ { VX_PRINT(VX_ZONE_ERROR, "Invalid Parameters!\n"); vxAddLogEntry(c, VX_ERROR_INVALID_PARAMETERS, "Invalid Parameters supplied to vxAddKernel\n"); return (vx_kernel)NULL; } /* find target to assign this to */ index = strnindex(name, ':', VX_MAX_TARGET_NAME); if (index == VX_MAX_TARGET_NAME) { strcpy(targetName,"khronos.c_model"); } else { strncpy(targetName, name, index); } VX_PRINT(VX_ZONE_KERNEL, "Deduced Name as %s\n", targetName); for (t = 0u; t < context->numTargets; t++) { target = &context->targets[t]; if (strncmp(targetName,target->name, VX_MAX_TARGET_NAME) == 0) { break; } target = NULL; } if (target && target->funcs.addtilingkernel) { kernel = target->funcs.addtilingkernel(target, name, enumeration, func_ptr, num_params, input, output); VX_PRINT(VX_ZONE_KERNEL,"Added Kernel %s to Target %s ("VX_FMT_REF")\n", name, target->name, kernel); } else { vxAddLogEntry(c, VX_ERROR_NO_RESOURCES, "No target named %s exists!\n", targetName); } return (vx_kernel)kernel; }
VX_API_ENTRY vx_status VX_API_CALL vxQueryContext(vx_context context, vx_enum attribute, void *ptr, vx_size size) { vx_status status = VX_SUCCESS; if (vxIsValidContext(context) == vx_false_e) { status = VX_ERROR_INVALID_REFERENCE; } else { switch (attribute) { case VX_CONTEXT_VENDOR_ID: if (VX_CHECK_PARAM(ptr, size, vx_uint16, 0x1)) { *(vx_uint16 *)ptr = VX_ID_KHRONOS; } else { status = VX_ERROR_INVALID_PARAMETERS; } break; case VX_CONTEXT_VERSION: if (VX_CHECK_PARAM(ptr, size, vx_uint16, 0x1)) { *(vx_uint16 *)ptr = (vx_uint16)VX_VERSION; } else { status = VX_ERROR_INVALID_PARAMETERS; } break; case VX_CONTEXT_MODULES: if (VX_CHECK_PARAM(ptr, size, vx_uint32, 0x3)) { *(vx_uint32 *)ptr = context->num_modules; } else { status = VX_ERROR_INVALID_PARAMETERS; } break; case VX_CONTEXT_REFERENCES: if (VX_CHECK_PARAM(ptr, size, vx_uint32, 0x3)) { *(vx_uint32 *)ptr = context->num_references; } else { status = VX_ERROR_INVALID_PARAMETERS; } break; #if defined(EXPERIMENTAL_USE_TARGET) case VX_CONTEXT_TARGETS: if (VX_CHECK_PARAM(ptr, size, vx_uint32, 0x3)) { *(vx_uint32 *)ptr = context->num_targets; } else { status = VX_ERROR_INVALID_PARAMETERS; } break; #endif case VX_CONTEXT_IMPLEMENTATION: if (size <= VX_MAX_IMPLEMENTATION_NAME && ptr) { strncpy(ptr, implementation, VX_MAX_IMPLEMENTATION_NAME); } else { status = VX_ERROR_INVALID_PARAMETERS; } break; case VX_CONTEXT_EXTENSIONS_SIZE: if (VX_CHECK_PARAM(ptr, size, vx_size, 0x3)) { *(vx_size *)ptr = sizeof(extensions); } else { status = VX_ERROR_INVALID_PARAMETERS; } break; case VX_CONTEXT_EXTENSIONS: if (size <= sizeof(extensions) && ptr) { strncpy(ptr, extensions, sizeof(extensions)); } else { status = VX_ERROR_INVALID_PARAMETERS; } break; case VX_CONTEXT_CONVOLUTION_MAX_DIMENSION: if (VX_CHECK_PARAM(ptr, size, vx_size, 0x3)) { *(vx_size *)ptr = VX_INT_MAX_CONVOLUTION_DIM; } else { status = VX_ERROR_INVALID_PARAMETERS; } break; case VX_CONTEXT_NONLINEAR_MAX_DIMENSION: if (VX_CHECK_PARAM(ptr, size, vx_size, 0x3)) { *(vx_size *)ptr = VX_INT_MAX_NONLINEAR_DIM; } else { status = VX_ERROR_INVALID_PARAMETERS; } break; case VX_CONTEXT_OPTICAL_FLOW_MAX_WINDOW_DIMENSION: if (VX_CHECK_PARAM(ptr, size, vx_size, 0x3)) { *(vx_size *)ptr = VX_OPTICALFLOWPYRLK_MAX_DIM; } else { status = VX_ERROR_INVALID_PARAMETERS; } break; case VX_CONTEXT_IMMEDIATE_BORDER: if (VX_CHECK_PARAM(ptr, size, vx_border_t, 0x3)) { *(vx_border_t *)ptr = context->imm_border; } else { status = VX_ERROR_INVALID_PARAMETERS; } break; case VX_CONTEXT_IMMEDIATE_BORDER_POLICY: if (VX_CHECK_PARAM(ptr, size, vx_enum, 0x3)) { *(vx_enum *)ptr = context->imm_border_policy; } else { status = VX_ERROR_INVALID_PARAMETERS; } break; case VX_CONTEXT_UNIQUE_KERNELS: if (VX_CHECK_PARAM(ptr, size, vx_uint32, 0x3)) { *(vx_uint32 *)ptr = context->num_unique_kernels; } else { status = VX_ERROR_INVALID_PARAMETERS; } break; case VX_CONTEXT_UNIQUE_KERNEL_TABLE: if ((size == (context->num_unique_kernels * sizeof(vx_kernel_info_t))) && (ptr != NULL)) { vx_uint32 k = 0u, t = 0u, k2 = 0u, numk = 0u; vx_kernel_info_t *table = (vx_kernel_info_t *)ptr; for (t = 0; t < context->num_targets; t++) { for (k = 0u; k < VX_INT_MAX_KERNELS; k++) { if (context->targets[t].kernels[k].enumeration != VX_KERNEL_INVALID) { vx_bool found = vx_false_e; VX_PRINT(VX_ZONE_INFO, "Checking uniqueness of %s (%d)\n", context->targets[t].kernels[k].name, context->targets[t].kernels[k].enumeration); for (k2 = 0u; k2 < numk; k2++) { if (table[k2].enumeration == context->targets[t].kernels[k].enumeration) { found = vx_true_e; break; } } if (found == vx_false_e) { VX_PRINT(VX_ZONE_INFO, "Kernel %s is unique\n", context->targets[t].kernels[k].name); table[numk].enumeration = context->targets[t].kernels[k].enumeration; #if defined(EXPERIMENTAL_USE_TARGET) || defined(EXPERIMENTAL_USE_VARIANT) // get the central string out { vx_uint32 c = 0; strncpy(table[numk].name, context->targets[t].kernels[k].name, VX_MAX_KERNEL_NAME); for (c = 0; table[numk].name[c] != '\0'; c++) { if (table[numk].name[c] == ';') { table[numk].name[c] = '\0'; break; } } } #else strncpy(table[numk].name, context->targets[t].kernels[k].name, VX_MAX_KERNEL_NAME); #endif numk++; } } } } } else { status = VX_ERROR_INVALID_PARAMETERS; } break; default: status = VX_ERROR_NOT_SUPPORTED; break; } } return status; }