VX_API_ENTRY vx_status VX_API_CALL vxCommitDistribution(vx_distribution distribution, const void *ptr) { vx_status status = VX_FAILURE; if ((vxIsValidSpecificReference(&distribution->base, VX_TYPE_DISTRIBUTION) == vx_true_e) && (vxAllocateMemory(distribution->base.context, &distribution->memory) == vx_true_e)) { if (ptr != NULL) { vxSemWait(&distribution->base.lock); { if (ptr != distribution->memory.ptrs[0]) { vx_size size = vxComputeMemorySize(&distribution->memory, 0); memcpy(distribution->memory.ptrs[0], ptr, size); VX_PRINT(VX_ZONE_INFO, "Copied distribution from %p to %p for "VX_FMT_SIZE" bytes\n", ptr, distribution->memory.ptrs[0], size); } } vxSemPost(&distribution->base.lock); vxWroteToReference(&distribution->base); } vxDecrementReference(&distribution->base, VX_EXTERNAL); status = VX_SUCCESS; } else { VX_PRINT(VX_ZONE_ERROR, "Not a valid object!\n"); } return status; }
vx_status vxTargetDeinit(vx_target_t *target) { vx_context context = target->base.context; if (vxGetStatus((vx_reference)context) == VX_SUCCESS) { cl_uint p = 0, d = 0; vx_uint32 k = 0; for (p = 0; p < context->num_platforms; p++) { for (k = 0; k < num_cl_kernels; k++) { vxDecrementReference(&target->kernels[k].base, VX_INTERNAL); clReleaseKernel(cl_kernels[k]->kernels[p]); clReleaseProgram(cl_kernels[k]->program[p]); } for (d = 0; d < context->num_devices[p]; d++) { clReleaseCommandQueue(context->queues[p][d]); } clReleaseContext(context->global[p]); } } return VX_SUCCESS; }
void vxReleaseReference(vx_reference_t *ref, vx_enum type, vx_bool internal, vx_destructor_f destructor) { if (vxIsValidSpecificReference(ref, type) == vx_true_e) { vx_bool result = vx_false_e; if (internal == vx_true_e) result = vxDecrementIntReference(ref); else result = vxDecrementReference(ref); if ((result == vx_true_e) && (vxTotalReferenceCount(ref) == 0)) { /* if the caller supplied a destructor, call it. */ if (destructor) { destructor(ref); } vxRemoveReference(ref->context, ref); ref->magic = 0; /* make sure no existing copies of refs can use ref again */ free(ref); } } }
vx_status vxRemoveKernel(vx_kernel kernel) { vx_status status = VX_ERROR_INVALID_PARAMETERS; vx_kernel_t *kern = (vx_kernel_t *)kernel; if (vxIsValidSpecificReference(&kern->base, VX_TYPE_KERNEL) == vx_true_e) { vxDecrementReference(&kern->base); kern->enabled = vx_false_e; kern->enumeration = VX_KERNEL_INVALID; kern->base.context->numKernels--; status = VX_SUCCESS; } return status; }
void vxReleaseKernel(vx_kernel *kernel) { vx_kernel_t *kern = (vx_kernel_t *)(kernel?*kernel:0); if (vxIsValidSpecificReference(&kern->base, VX_TYPE_KERNEL) == vx_true_e) { VX_PRINT(VX_ZONE_KERNEL, "Releasing kernel "VX_FMT_REF"\n", (void *)kern); vxDecrementReference(&kern->base); } else { VX_PRINT(VX_ZONE_ERROR, "Invalid Reference!\n"); } if (kernel) *kernel = 0; }
vx_status vxCommitConvolutionCoefficients(vx_convolution conv, vx_int16 *array) { vx_convolution_t *convolution = (vx_convolution_t *)conv; vx_status status = VX_ERROR_INVALID_REFERENCE; if ((vxIsValidSpecificReference(&convolution->base.base, VX_TYPE_CONVOLUTION) == vx_true_e) && (vxAllocateMemory(convolution->base.base.context, &convolution->base.memory) == vx_true_e)) { vxSemWait(&convolution->base.base.lock); if (array) { vx_size size = convolution->base.memory.strides[0][1] * convolution->base.memory.dims[0][1]; memcpy(convolution->base.memory.ptrs[0], array, size); } vxSemPost(&convolution->base.base.lock); vxWroteToReference(&convolution->base.base); vxDecrementReference(&convolution->base.base); status = VX_SUCCESS; } return status; }
VX_API_ENTRY vx_status VX_API_CALL vxReleaseContext(vx_context *c) { vx_status status = VX_SUCCESS; vx_context context = (c?*c:0); vx_uint32 r,m,a; vx_uint32 t; if (c) *c = 0; vxSemWait(&context_lock); if (vxIsValidContext(context) == vx_true_e) { if (vxDecrementReference(&context->base, VX_EXTERNAL) == 0) { vxDestroyThreadpool(&context->workers); context->proc.running = vx_false_e; vxPopQueue(&context->proc.input); vxJoinThread(context->proc.thread, NULL); vxDeinitQueue(&context->proc.output); vxDeinitQueue(&context->proc.input); /* Deregister any log callbacks if there is any registered */ vxRegisterLogCallback(context, NULL, vx_false_e); /*! \internal Garbage Collect All References */ /* Details: * 1. This loop will warn of references which have not been released by the user. * 2. It will close all internally opened error references. * 3. It will close the external references, which in turn will internally * close any internally dependent references that they reference, assuming the * reference counting has been done properly in the framework. * 4. This garbage collection must be done before the targets are released since some of * these external references may have internal references to target kernels. */ for (r = 0; r < VX_INT_MAX_REF; r++) { vx_reference_t *ref = context->reftable[r]; /* Warnings should only come when users have not released all external references */ if (ref && ref->external_count > 0) { VX_PRINT(VX_ZONE_WARNING,"Stale reference "VX_FMT_REF" of type %08x at external count %u, internal count %u\n", ref, ref->type, ref->external_count, ref->internal_count); } /* These were internally opened during creation, so should internally close ERRORs */ if(ref && ref->type == VX_TYPE_ERROR) { vxReleaseReferenceInt(&ref, ref->type, VX_INTERNAL, NULL); } /* Warning above so user can fix release external objects, but close here anyway */ while (ref && ref->external_count > 1) { vxDecrementReference(ref, VX_EXTERNAL); } if (ref && ref->external_count > 0) { vxReleaseReferenceInt(&ref, ref->type, VX_EXTERNAL, NULL); } } for (m = 0; m < context->num_modules; m++) { if (context->modules[m].handle) { vxUnloadModule(context->modules[m].handle); memset(context->modules[m].name, 0, sizeof(context->modules[m].name)); context->modules[m].handle = VX_MODULE_INIT; } } /* de-initialize and unload each target */ for (t = 0u; t < context->num_targets; t++) { if (context->targets[t].enabled == vx_true_e) { context->targets[t].funcs.deinit(&context->targets[t]); vxUnloadTarget(context, t, vx_true_e); context->targets[t].enabled = vx_false_e; } } /* Remove all outstanding accessors. */ for (a = 0; a < dimof(context->accessors); ++a) if (context->accessors[a].used) vxRemoveAccessor(context, a); /* Check for outstanding mappings */ for (a = 0; a < dimof(context->memory_maps); ++a) { if (context->memory_maps[a].used) { VX_PRINT(VX_ZONE_ERROR, "Memory map %d not unmapped\n", a); vxMemoryUnmap(context, a); } } vxDestroySem(&context->memory_maps_lock); /* By now, all external and internal references should be removed */ for (r = 0; r < VX_INT_MAX_REF; r++) { if(context->reftable[r]) VX_PRINT(VX_ZONE_ERROR,"Reference %d not removed\n", r); } #ifdef EXPERIMENTAL_USE_HEXAGON remote_handle_close(tmp_ph); #endif /*! \internal wipe away the context memory first */ /* Normally destroy sem is part of release reference, but can't for context */ vxDestroySem(&((vx_reference )context)->lock); memset(context, 0, sizeof(vx_context_t)); free((void *)context); vxDestroySem(&global_lock); vxSemPost(&context_lock); vxDestroySem(&context_lock); single_context = NULL; return status; } else { VX_PRINT(VX_ZONE_WARNING, "Context still has %u holders\n", vxTotalReferenceCount(&context->base)); } } else { status = VX_ERROR_INVALID_REFERENCE; } vxSemPost(&context_lock); return status; }
vx_status vxInitPyramid(vx_pyramid pyramid, vx_size levels, vx_float32 scale, vx_uint32 width, vx_uint32 height, vx_df_image format) { const vx_float32 c_orbscale[4] = {0.5f, VX_SCALE_PYRAMID_ORB, VX_SCALE_PYRAMID_ORB * VX_SCALE_PYRAMID_ORB, VX_SCALE_PYRAMID_ORB * VX_SCALE_PYRAMID_ORB * VX_SCALE_PYRAMID_ORB}; vx_status status = VX_SUCCESS; /* very first init will come in here */ if (pyramid->levels == NULL) { pyramid->numLevels = levels; pyramid->scale = scale; pyramid->levels = (vx_image *)calloc(levels, sizeof(vx_image_t *)); } /* these could be "virtual" values or hard values */ pyramid->width = width; pyramid->height = height; pyramid->format = format; if (pyramid->levels) { if (pyramid->width != 0 && pyramid->height != 0 && format != VX_DF_IMAGE_VIRT) { vx_int32 i; vx_uint32 w = pyramid->width; vx_uint32 h = pyramid->height; vx_uint32 ref_w = pyramid->width; vx_uint32 ref_h = pyramid->height; for (i = 0; i < pyramid->numLevels; i++) { vx_context c = (vx_context)pyramid->base.context; if (pyramid->levels[i] == 0) { pyramid->levels[i] = vxCreateImage(c, w, h, format); /* increment the internal counter on the image, not the external one */ vxIncrementReference((vx_reference_t *)pyramid->levels[i], VX_INTERNAL); vxDecrementReference((vx_reference_t *)pyramid->levels[i], VX_EXTERNAL); /* remember that the scope of the image is the pyramid */ ((vx_image_t *)pyramid->levels[i])->base.scope = (vx_reference_t *)pyramid; if (VX_SCALE_PYRAMID_ORB == scale) { vx_float32 orb_scale = c_orbscale[(i + 1) % 4]; w = (vx_uint32)ceilf((vx_float32)ref_w * orb_scale); h = (vx_uint32)ceilf((vx_float32)ref_h * orb_scale); if (0 == ((i + 1) % 4)) { ref_w = w; ref_h = h; } } else { w = (vx_uint32)ceilf((vx_float32)w * scale); h = (vx_uint32)ceilf((vx_float32)h * scale); } } } } else { /* virtual images, but in a pyramid we really need to know the * level 0 value. Dimensionless images don't work after validation * time. */ } } else { status = VX_ERROR_NO_MEMORY; } return status; }
vx_status vxCommitArrayRangeInt(vx_array arr, vx_size start, vx_size end, const void *ptr) { vx_status status = VX_ERROR_INVALID_REFERENCE; vx_bool external = vx_true_e; // assume that it was an allocated buffer if ((ptr == NULL) || (start > end) || (end > arr->num_items)) { return VX_ERROR_INVALID_PARAMETERS; } /* determine if virtual before checking for memory */ if (arr->base.is_virtual == vx_true_e) { if (arr->base.is_accessible == vx_false_e) { /* User tried to access a "virtual" array. */ VX_PRINT(VX_ZONE_ERROR, "Can not access a virtual array\n"); return VX_ERROR_OPTIMIZED_AWAY; } /* framework trying to access a virtual image, this is ok. */ } /* VARIABLES: * 1.) ZERO_AREA * 2.) CONSTANT - independant * 3.) INTERNAL - independant of area * 4.) EXTERNAL - dependant on area (do nothing on zero, determine on non-zero) * 5.) !INTERNAL && !EXTERNAL == MAPPED */ { /* check to see if the range is zero area */ vx_bool zero_area = (end == 0) ? vx_true_e : vx_false_e; vx_uint32 index = UINT32_MAX; // out of bounds, if given to remove, won't do anything vx_bool internal = vxFindAccessor(arr->base.context, ptr, &index); if (zero_area == vx_false_e) { /* this could be a write-back */ if (internal == vx_true_e && arr->base.context->accessors[index].usage == VX_READ_ONLY) { /* this is a buffer that we allocated on behalf of the user and now they are done. Do nothing else*/ vxRemoveAccessor(arr->base.context, index); } else { vx_uint8 *beg_ptr = arr->memory.ptrs[0]; vx_uint8 *end_ptr = &beg_ptr[arr->item_size * arr->num_items]; if ((beg_ptr <= (vx_uint8 *)ptr) && ((vx_uint8 *)ptr < end_ptr)) { /* the pointer in contained in the array, so it was mapped, thus * there's nothing else to do. */ external = vx_false_e; } if (external == vx_true_e || internal == vx_true_e) { /* the pointer was not mapped, copy. */ vx_size offset = start * arr->item_size; vx_size len = (end - start) * arr->item_size; if (internal == vx_true_e) { vx_size stride = *(vx_size *)arr->base.context->accessors[index].extra_data; if (stride == arr->item_size) { memcpy(&beg_ptr[offset], ptr, len); } else { int i; const vx_uint8 *pSrc; vx_uint8 *pDest; for (i = start, pSrc = ptr, pDest= &beg_ptr[offset]; i < end; i++, pSrc += stride, pDest += arr->item_size) { memcpy(pDest, pSrc, arr->item_size); } } /* a write only or read/write copy */ vxRemoveAccessor(arr->base.context, index); } else { memcpy(&beg_ptr[offset], ptr, len); } } vxWroteToReference(&arr->base); } vxSemPost(&arr->memory.locks[0]); status = VX_SUCCESS; } else { /* could be RO|WO|RW where they decided not to commit anything. */ if (internal == vx_true_e) // RO { vxRemoveAccessor(arr->base.context, index); } else // RW|WO { vxSemPost(&arr->memory.locks[0]); } status = VX_SUCCESS; } vxDecrementReference(&arr->base, VX_EXTERNAL); } return status; }