vx_reference_t *vxCreateReference(vx_context_t *context, vx_enum type) { vx_reference_t *ref = VX_CALLOC(vx_reference_t); if (ref) { vxInitReference(ref, context, type); } return ref; }
vx_threshold vxCreateThreshold(vx_context c, vx_enum type) { vx_context_t *context = (vx_context_t *)c; vx_threshold_t *thresh = NULL; if (vxIsValidThresholdType(type) == vx_true_e) { if (vxIsValidContext(context) == vx_true_e) { thresh = VX_CALLOC(vx_threshold_t); if (thresh) { vxInitReference(&thresh->base, context, VX_TYPE_THRESHOLD); vxIncrementReference(&thresh->base); vxAddReference(thresh->base.context, (vx_reference_t *)thresh); thresh->type = type; } } } return (vx_threshold)thresh; }
vx_kernel_t *vxAllocateKernel(vx_context_t *context, vx_enum kenum, vx_kernel_f function, vx_char name[VX_MAX_KERNEL_NAME], vx_param_description_t *parameters, vx_uint32 numParams) { vx_kernel_t *kernel = VX_CALLOC(vx_kernel_t); if (kernel) { vxInitReference((vx_reference_t *)kernel, context, VX_TYPE_KERNEL); vxIncrementReference(&kernel->base); /* setup the kernel meta-data */ strncpy(kernel->name, name, VX_MAX_KERNEL_NAME); kernel->enumeration = kenum; kernel->function = function; kernel->signature.numParams = numParams; kernel->attributes.borders.mode = VX_BORDER_MODE_UNDEFINED; if (kernel->signature.numParams < VX_INT_MAX_PARAMS) { vx_uint32 p = 0; if (parameters != NULL) { for (p = 0; p < numParams; p++) { kernel->signature.directions[p] = parameters[p].direction; kernel->signature.types[p] = parameters[p].type; } } } else { free(kernel); kernel = NULL; } } return kernel; }
vx_convolution vxCreateConvolution(vx_context context, vx_size columns, vx_size rows) { vx_convolution_t *convolution = NULL; if (vxIsValidContext((vx_context_t *)context) == vx_true_e && isodd(columns) && columns >= 3 && isodd(rows) && rows >= 3) { convolution = VX_CALLOC(vx_convolution_t); if (convolution) { vxInitReference(&convolution->base.base, (vx_context_t *)context, VX_TYPE_CONVOLUTION); vxIncrementReference(&convolution->base.base); vxAddReference(convolution->base.base.context, &convolution->base.base); convolution->base.type = VX_TYPE_INT16; convolution->base.columns = columns; convolution->base.rows = rows; convolution->base.memory.ndims = 2; convolution->base.memory.nptrs = 1; convolution->base.memory.dims[0][0] = sizeof(vx_int16); convolution->base.memory.dims[0][1] = (vx_int32)(columns*rows); convolution->scale = 1; } } return (vx_convolution)convolution; }
VX_API_ENTRY vx_context VX_API_CALL vxCreateContext() #endif { vx_context context = NULL; if (single_context == NULL) { vxCreateSem(&context_lock, 1); vxCreateSem(&global_lock, 1); } vxSemWait(&context_lock); if (single_context == NULL) { /* read the variables for debugging flags */ vx_set_debug_zone_from_env(); context = VX_CALLOC(vx_context_t); /* \todo get from allocator? */ if (context) { vx_uint32 p = 0u, p2 = 0u, t = 0u; context->p_global_lock = &global_lock; context->imm_border.mode = VX_BORDER_UNDEFINED; context->imm_border_policy = VX_BORDER_POLICY_DEFAULT_TO_UNDEFINED; context->next_dynamic_user_kernel_id = 0; context->next_dynamic_user_library_id = 1; vxInitReference(&context->base, NULL, VX_TYPE_CONTEXT, NULL); #if !DISABLE_ICD_COMPATIBILITY context->base.platform = platform; #endif vxIncrementReference(&context->base, VX_EXTERNAL); context->workers = vxCreateThreadpool(VX_INT_HOST_CORES, VX_INT_MAX_REF, /* very deep queues! */ sizeof(vx_work_t), vxWorkerNode, context); vxCreateConstErrors(context); #ifdef EXPERIMENTAL_USE_HEXAGON remote_handle_open((const char *)OPENVX_HEXAGON_NAME, &tmp_ph); #endif /* load all targets */ for (t = 0u; t < dimof(targetModules); t++) { if (vxLoadTarget(context, targetModules[t]) == VX_SUCCESS) { context->num_targets++; } } if (context->num_targets == 0) { VX_PRINT(VX_ZONE_ERROR, "No targets loaded!\n"); free(context); vxSemPost(&context_lock); return 0; } /* initialize all targets */ for (t = 0u; t < context->num_targets; t++) { if (context->targets[t].module.handle) { /* call the init function */ if (context->targets[t].funcs.init(&context->targets[t]) != VX_SUCCESS) { VX_PRINT(VX_ZONE_WARNING, "Target %s failed to initialize!\n", context->targets[t].name); /* unload this module */ vxUnloadTarget(context, t, vx_true_e); break; } else { context->targets[t].enabled = vx_true_e; } } } /* assign the targets by priority into the list */ p2 = 0u; for (p = 0u; p < VX_TARGET_PRIORITY_MAX; p++) { for (t = 0u; t < context->num_targets; t++) { vx_target_t * target = &context->targets[t]; if (p == target->priority) { context->priority_targets[p2] = t; p2++; } } } /* print out the priority list */ for (t = 0u; t < context->num_targets; t++) { vx_target_t *target = &context->targets[context->priority_targets[t]]; if (target->enabled == vx_true_e) { VX_PRINT(VX_ZONE_TARGET, "target[%u]: %s\n", target->priority, target->name); } } // create the internal thread which processes graphs for asynchronous mode. vxInitQueue(&context->proc.input); vxInitQueue(&context->proc.output); context->proc.running = vx_true_e; context->proc.thread = vxCreateThread(vxWorkerGraph, &context->proc); single_context = context; context->imm_target_enum = VX_TARGET_ANY; memset(context->imm_target_string, 0, sizeof(context->imm_target_string)); /* memory maps table lock */ vxCreateSem(&context->memory_maps_lock, 1); } } else { context = single_context; vxIncrementReference(&context->base, VX_EXTERNAL); } vxSemPost(&context_lock); return (vx_context)context; }