VX_API_ENTRY vx_status VX_API_CALL vxCommitDistribution(vx_distribution distribution, const void *ptr) { vx_status status = VX_FAILURE; if ((vxIsValidSpecificReference(&distribution->base, VX_TYPE_DISTRIBUTION) == vx_true_e) && (vxAllocateMemory(distribution->base.context, &distribution->memory) == vx_true_e)) { if (ptr != NULL) { vxSemWait(&distribution->base.lock); { if (ptr != distribution->memory.ptrs[0]) { vx_size size = vxComputeMemorySize(&distribution->memory, 0); memcpy(distribution->memory.ptrs[0], ptr, size); VX_PRINT(VX_ZONE_INFO, "Copied distribution from %p to %p for "VX_FMT_SIZE" bytes\n", ptr, distribution->memory.ptrs[0], size); } } vxSemPost(&distribution->base.lock); vxWroteToReference(&distribution->base); } vxDecrementReference(&distribution->base, VX_EXTERNAL); status = VX_SUCCESS; } else { VX_PRINT(VX_ZONE_ERROR, "Not a valid object!\n"); } return status; }
VX_API_ENTRY vx_status VX_API_CALL vxTruncateArray(vx_array arr, vx_size new_num_items) { vx_status status = VX_ERROR_INVALID_REFERENCE; if (vxIsValidArray(arr) == vx_true_e) { status = VX_ERROR_INVALID_PARAMETERS; if (new_num_items <= arr->num_items) { arr->num_items = new_num_items; vxWroteToReference(&arr->base); status = VX_SUCCESS; } } return status; }
VX_API_ENTRY vx_status VX_API_CALL vxAddArrayItems(vx_array arr, vx_size count, const void *ptr, vx_size stride) { vx_status status = VX_ERROR_INVALID_REFERENCE; if (vxIsValidArray(arr) == vx_true_e) { status = VX_ERROR_NO_MEMORY; if(vxAllocateArray(arr) == vx_true_e) { status = VX_ERROR_INVALID_PARAMETERS; if ((count > 0) && (ptr != NULL) && (stride == 0 || stride >= arr->item_size)) { status = VX_FAILURE; if (arr->num_items + count <= arr->capacity) { vx_size offset = arr->num_items * arr->item_size; vx_uint8 *dst_ptr = &arr->memory.ptrs[0][offset]; if (stride == 0 || stride == arr->item_size) { memcpy(dst_ptr, ptr, count * arr->item_size); } else { vx_size i; for (i = 0; i < count; ++i) { vx_uint8 *tmp = (vx_uint8 *)ptr; memcpy(&dst_ptr[i * arr->item_size], &tmp[i * stride], arr->item_size); } } arr->num_items += count; vxWroteToReference(&arr->base); status = VX_SUCCESS; } } } } return status; }
vx_status vxCommitConvolutionCoefficients(vx_convolution conv, vx_int16 *array) { vx_convolution_t *convolution = (vx_convolution_t *)conv; vx_status status = VX_ERROR_INVALID_REFERENCE; if ((vxIsValidSpecificReference(&convolution->base.base, VX_TYPE_CONVOLUTION) == vx_true_e) && (vxAllocateMemory(convolution->base.base.context, &convolution->base.memory) == vx_true_e)) { vxSemWait(&convolution->base.base.lock); if (array) { vx_size size = convolution->base.memory.strides[0][1] * convolution->base.memory.dims[0][1]; memcpy(convolution->base.memory.ptrs[0], array, size); } vxSemPost(&convolution->base.base.lock); vxWroteToReference(&convolution->base.base); vxDecrementReference(&convolution->base.base); status = VX_SUCCESS; } return status; }
VX_API_ENTRY vx_status VX_API_CALL vxCommitScalarValue(vx_scalar scalar, void *ptr) { vx_status status = VX_SUCCESS; if (vxIsValidSpecificReference(&scalar->base,VX_TYPE_SCALAR) == vx_false_e) return VX_ERROR_INVALID_REFERENCE; if (ptr == NULL) return VX_ERROR_INVALID_PARAMETERS; vxSemWait(&scalar->base.lock); switch (scalar->data_type) { case VX_TYPE_CHAR: scalar->data.chr = *(vx_char *)ptr; break; case VX_TYPE_INT8: scalar->data.s08 = *(vx_int8 *)ptr; break; case VX_TYPE_UINT8: scalar->data.u08 = *(vx_uint8 *)ptr; break; case VX_TYPE_INT16: scalar->data.s16 = *(vx_int16 *)ptr; break; case VX_TYPE_UINT16: scalar->data.u16 = *(vx_uint16 *)ptr; break; case VX_TYPE_INT32: scalar->data.s32 = *(vx_int32 *)ptr; break; case VX_TYPE_UINT32: scalar->data.u32 = *(vx_uint32 *)ptr; break; case VX_TYPE_INT64: scalar->data.s64 = *(vx_int64 *)ptr; break; case VX_TYPE_UINT64: scalar->data.u64 = *(vx_uint64 *)ptr; break; #if OVX_SUPPORT_HALF_FLOAT case VX_TYPE_FLOAT16: scalar->data.f16 = *(vx_float16 *)ptr; break; #endif case VX_TYPE_FLOAT32: scalar->data.f32 = *(vx_float32 *)ptr; break; case VX_TYPE_FLOAT64: scalar->data.f64 = *(vx_float64 *)ptr; break; case VX_TYPE_DF_IMAGE: scalar->data.fcc = *(vx_df_image *)ptr; break; case VX_TYPE_ENUM: scalar->data.enm = *(vx_enum *)ptr; break; case VX_TYPE_SIZE: scalar->data.size = *(vx_size *)ptr; break; case VX_TYPE_BOOL: scalar->data.boolean = *(vx_bool *)ptr; break; default: VX_PRINT(VX_ZONE_ERROR, "some case is not covered in %s\n", __FUNCTION__); status = VX_ERROR_NOT_SUPPORTED; break; } vxPrintScalarValue(scalar); vxSemPost(&scalar->base.lock); vxWroteToReference(&scalar->base); return status; }
vx_status vxSetThresholdAttribute(vx_threshold t, vx_enum attribute, void *ptr, vx_size size) { vx_status status = VX_SUCCESS; vx_threshold_t *thresh = (vx_threshold_t *)t; if (vxIsValidSpecificReference(&thresh->base, VX_TYPE_THRESHOLD) == vx_true_e) { switch (attribute) { case VX_THRESHOLD_ATTRIBUTE_VALUE: if (VX_CHECK_PARAM(ptr, size, vx_uint8, 0x0) && (thresh->type == VX_THRESHOLD_TYPE_BINARY)) { thresh->value = *(vx_uint8 *)ptr; vxWroteToReference(&thresh->base); } else { status = VX_ERROR_INVALID_PARAMETERS; } break; case VX_THRESHOLD_ATTRIBUTE_LOWER: if (VX_CHECK_PARAM(ptr, size, vx_uint8, 0x0) && (thresh->type == VX_THRESHOLD_TYPE_RANGE)) { thresh->lower = *(vx_uint8 *)ptr; vxWroteToReference(&thresh->base); } else { status = VX_ERROR_INVALID_PARAMETERS; } break; case VX_THRESHOLD_ATTRIBUTE_UPPER: if (VX_CHECK_PARAM(ptr, size, vx_uint8, 0x0) && (thresh->type == VX_THRESHOLD_TYPE_RANGE)) { thresh->upper = *(vx_uint8 *)ptr; vxWroteToReference(&thresh->base); } else { status = VX_ERROR_INVALID_PARAMETERS; } break; case VX_THRESHOLD_ATTRIBUTE_TYPE: if (VX_CHECK_PARAM(ptr, size, vx_enum, 0x3)) { vx_enum type = *(vx_enum *)ptr; if (vxIsValidThresholdType(type) == vx_true_e) { thresh->type = type; } else { status = VX_ERROR_INVALID_PARAMETERS; } } else { status = VX_ERROR_INVALID_PARAMETERS; } break; default: status = VX_ERROR_NOT_SUPPORTED; break; } } else { status = VX_ERROR_INVALID_REFERENCE; } VX_PRINT(VX_ZONE_API, "return %d\n", status); return status; }
vx_status vxCommitArrayRangeInt(vx_array arr, vx_size start, vx_size end, const void *ptr) { vx_status status = VX_ERROR_INVALID_REFERENCE; vx_bool external = vx_true_e; // assume that it was an allocated buffer if ((ptr == NULL) || (start > end) || (end > arr->num_items)) { return VX_ERROR_INVALID_PARAMETERS; } /* determine if virtual before checking for memory */ if (arr->base.is_virtual == vx_true_e) { if (arr->base.is_accessible == vx_false_e) { /* User tried to access a "virtual" array. */ VX_PRINT(VX_ZONE_ERROR, "Can not access a virtual array\n"); return VX_ERROR_OPTIMIZED_AWAY; } /* framework trying to access a virtual image, this is ok. */ } /* VARIABLES: * 1.) ZERO_AREA * 2.) CONSTANT - independant * 3.) INTERNAL - independant of area * 4.) EXTERNAL - dependant on area (do nothing on zero, determine on non-zero) * 5.) !INTERNAL && !EXTERNAL == MAPPED */ { /* check to see if the range is zero area */ vx_bool zero_area = (end == 0) ? vx_true_e : vx_false_e; vx_uint32 index = UINT32_MAX; // out of bounds, if given to remove, won't do anything vx_bool internal = vxFindAccessor(arr->base.context, ptr, &index); if (zero_area == vx_false_e) { /* this could be a write-back */ if (internal == vx_true_e && arr->base.context->accessors[index].usage == VX_READ_ONLY) { /* this is a buffer that we allocated on behalf of the user and now they are done. Do nothing else*/ vxRemoveAccessor(arr->base.context, index); } else { vx_uint8 *beg_ptr = arr->memory.ptrs[0]; vx_uint8 *end_ptr = &beg_ptr[arr->item_size * arr->num_items]; if ((beg_ptr <= (vx_uint8 *)ptr) && ((vx_uint8 *)ptr < end_ptr)) { /* the pointer in contained in the array, so it was mapped, thus * there's nothing else to do. */ external = vx_false_e; } if (external == vx_true_e || internal == vx_true_e) { /* the pointer was not mapped, copy. */ vx_size offset = start * arr->item_size; vx_size len = (end - start) * arr->item_size; if (internal == vx_true_e) { vx_size stride = *(vx_size *)arr->base.context->accessors[index].extra_data; if (stride == arr->item_size) { memcpy(&beg_ptr[offset], ptr, len); } else { int i; const vx_uint8 *pSrc; vx_uint8 *pDest; for (i = start, pSrc = ptr, pDest= &beg_ptr[offset]; i < end; i++, pSrc += stride, pDest += arr->item_size) { memcpy(pDest, pSrc, arr->item_size); } } /* a write only or read/write copy */ vxRemoveAccessor(arr->base.context, index); } else { memcpy(&beg_ptr[offset], ptr, len); } } vxWroteToReference(&arr->base); } vxSemPost(&arr->memory.locks[0]); status = VX_SUCCESS; } else { /* could be RO|WO|RW where they decided not to commit anything. */ if (internal == vx_true_e) // RO { vxRemoveAccessor(arr->base.context, index); } else // RW|WO { vxSemPost(&arr->memory.locks[0]); } status = VX_SUCCESS; } vxDecrementReference(&arr->base, VX_EXTERNAL); } return status; }