VX_API_ENTRY vx_status VX_API_CALL vxReleaseContext(vx_context *c) { vx_status status = VX_SUCCESS; vx_context context = (c?*c:0); vx_uint32 r,m,a; vx_uint32 t; if (c) *c = 0; vxSemWait(&context_lock); if (vxIsValidContext(context) == vx_true_e) { if (vxDecrementReference(&context->base, VX_EXTERNAL) == 0) { vxDestroyThreadpool(&context->workers); context->proc.running = vx_false_e; vxPopQueue(&context->proc.input); vxJoinThread(context->proc.thread, NULL); vxDeinitQueue(&context->proc.output); vxDeinitQueue(&context->proc.input); /* Deregister any log callbacks if there is any registered */ vxRegisterLogCallback(context, NULL, vx_false_e); /*! \internal Garbage Collect All References */ /* Details: * 1. This loop will warn of references which have not been released by the user. * 2. It will close all internally opened error references. * 3. It will close the external references, which in turn will internally * close any internally dependent references that they reference, assuming the * reference counting has been done properly in the framework. * 4. This garbage collection must be done before the targets are released since some of * these external references may have internal references to target kernels. */ for (r = 0; r < VX_INT_MAX_REF; r++) { vx_reference_t *ref = context->reftable[r]; /* Warnings should only come when users have not released all external references */ if (ref && ref->external_count > 0) { VX_PRINT(VX_ZONE_WARNING,"Stale reference "VX_FMT_REF" of type %08x at external count %u, internal count %u\n", ref, ref->type, ref->external_count, ref->internal_count); } /* These were internally opened during creation, so should internally close ERRORs */ if(ref && ref->type == VX_TYPE_ERROR) { vxReleaseReferenceInt(&ref, ref->type, VX_INTERNAL, NULL); } /* Warning above so user can fix release external objects, but close here anyway */ while (ref && ref->external_count > 1) { vxDecrementReference(ref, VX_EXTERNAL); } if (ref && ref->external_count > 0) { vxReleaseReferenceInt(&ref, ref->type, VX_EXTERNAL, NULL); } } for (m = 0; m < context->num_modules; m++) { if (context->modules[m].handle) { vxUnloadModule(context->modules[m].handle); memset(context->modules[m].name, 0, sizeof(context->modules[m].name)); context->modules[m].handle = VX_MODULE_INIT; } } /* de-initialize and unload each target */ for (t = 0u; t < context->num_targets; t++) { if (context->targets[t].enabled == vx_true_e) { context->targets[t].funcs.deinit(&context->targets[t]); vxUnloadTarget(context, t, vx_true_e); context->targets[t].enabled = vx_false_e; } } /* Remove all outstanding accessors. */ for (a = 0; a < dimof(context->accessors); ++a) if (context->accessors[a].used) vxRemoveAccessor(context, a); /* Check for outstanding mappings */ for (a = 0; a < dimof(context->memory_maps); ++a) { if (context->memory_maps[a].used) { VX_PRINT(VX_ZONE_ERROR, "Memory map %d not unmapped\n", a); vxMemoryUnmap(context, a); } } vxDestroySem(&context->memory_maps_lock); /* By now, all external and internal references should be removed */ for (r = 0; r < VX_INT_MAX_REF; r++) { if(context->reftable[r]) VX_PRINT(VX_ZONE_ERROR,"Reference %d not removed\n", r); } #ifdef EXPERIMENTAL_USE_HEXAGON remote_handle_close(tmp_ph); #endif /*! \internal wipe away the context memory first */ /* Normally destroy sem is part of release reference, but can't for context */ vxDestroySem(&((vx_reference )context)->lock); memset(context, 0, sizeof(vx_context_t)); free((void *)context); vxDestroySem(&global_lock); vxSemPost(&context_lock); vxDestroySem(&context_lock); single_context = NULL; return status; } else { VX_PRINT(VX_ZONE_WARNING, "Context still has %u holders\n", vxTotalReferenceCount(&context->base)); } } else { status = VX_ERROR_INVALID_REFERENCE; } vxSemPost(&context_lock); return status; }
vx_status vxLoadKernels(vx_context c, vx_char *name) { vx_context_t *context = (vx_context_t *)c; vx_status status = VX_FAILURE; vx_char module[VX_INT_MAX_PATH]; vx_uint32 m = 0; vx_publish_kernels_f publish = NULL; sprintf(module, VX_MODULE_NAME("%s"), (name?name:"openvx-ext")); if (vxIsValidContext(context) == vx_false_e) { VX_PRINT(VX_ZONE_ERROR, "Context is invalid!\n"); return VX_ERROR_INVALID_REFERENCE; } for (m = 0; m < VX_INT_MAX_MODULES; m++) { if (context->modules[m].handle == NULL) { context->modules[m].handle = vxLoadModule(module); if (context->modules[m].handle) { vx_symbol_t sym = vxGetSymbol(context->modules[m].handle, "vxPublishKernels"); publish = (vx_publish_kernels_f)sym; if (publish == NULL) { VX_PRINT(VX_ZONE_ERROR, "Failed to load symbol vxPublishKernels\n"); status = VX_ERROR_INVALID_MODULE; vxUnloadModule(context->modules[m].handle); context->modules[m].handle = NULL; } else { VX_PRINT(VX_ZONE_INFO, "Calling %s publish function\n", module); status = publish((vx_context)context); if (status != VX_SUCCESS) { VX_PRINT(VX_ZONE_ERROR, "Failed to publish kernels in module\n"); vxUnloadModule(context->modules[m].handle); context->modules[m].handle = NULL; } else { strncpy(context->modules[m].name, name, VX_INT_MAX_PATH); context->numMods++; } } } else { VX_PRINT(VX_ZONE_ERROR, "Failed to find module %s in libraries path\n", module); } break; } else { VX_PRINT(VX_ZONE_CONTEXT, "module[%u] is used\n", m); } } if (status != VX_SUCCESS) { VX_PRINT(VX_ZONE_ERROR, "Failed to load module %s; error %d\n", module, status); } else { for (m = 0; m < context->numMods; m++) { VX_PRINT(VX_ZONE_INFO, "Module: %s\n", context->modules[m].name); } } return status; }