uArray* uArray::New(uType* type, int length, const void* optionalData) { U_ASSERT(type && type->Type == uTypeTypeArray); uArrayType* arrayType = (uArrayType*)type; uType* elementType = arrayType->ElementType; size_t elementSize = elementType->ValueSize; uArray* array = (uArray*)uNew(type, sizeof(uArray) + elementSize * length); array->_ptr = (uint8_t*)array + sizeof(uArray); array->_length = length; if (optionalData) { memcpy(array->Ptr(), optionalData, elementSize * length); if (U_IS_OBJECT(elementType)) for (int i = 0; i < length; i++) uRetain(((uObject**)array->Ptr())[i]); else if (elementType->Flags & uTypeFlagsRetainStruct) for (int i = 0; i < length; i++) uRetainStruct(elementType, (uint8_t*)array->Ptr() + elementType->ValueSize * i); } return array; }
uObject* uDelegate::Invoke(uArray* array) { uDelegateType* type = (uDelegateType*)GetType(); size_t count = type->ParameterCount; uType** params = type->ParameterTypes; void** args = NULL; if (array) { if (!U_IS_OBJECT(((uArrayType*)array->GetType())->ElementType)) U_THROW_ICE(); if (count != array->Length()) U_THROW_IOORE(); uObject** objects = (uObject**)array->Ptr(); void** ptr = args = count > 0 ? (void**)U_ALLOCA(count * sizeof(void*)) : NULL; for (size_t i = 0; i < count; i++) { uType* param = *params++; uObject* object = *objects++; switch (param->Type) { case uTypeTypeEnum: case uTypeTypeStruct: *ptr++ = (uint8_t*)object + sizeof(uObject); break; case uTypeTypeByRef: *ptr++ = U_IS_VALUE(((uByRefType*)param)->ValueType) ? (uint8_t*)object + sizeof(uObject) : (void*)&object; break; case uTypeTypeClass: case uTypeTypeDelegate: case uTypeTypeInterface: case uTypeTypeArray: *ptr++ = object; break; default: U_FATAL(); } } } uType* returnType = type->ReturnType; void* retval = !U_IS_VOID(returnType) ? U_ALLOCA(returnType->ValueSize) : NULL; Invoke(retval, args, count); return uBoxPtr(returnType, retval, NULL, true); }
uObject* uType::New() { if (!U_IS_OBJECT(this)) U_THROW_ICE(); if (!fp_ctor_) U_THROW_NRE(); uObject* result; GenericCount > 0 ? ((void(*)(uType*, uObject**))fp_ctor_)(this, &result) : ((void(*)(uObject**))fp_ctor_)(&result); return result; }
void uCopy(uType* type, const void* src, void* dst, uint8_t flags) { U_ASSERT(type && dst && ( U_IS_OBJECT(type) && flags & uCopyFlagsValue || src )); if (U_IS_OBJECT(type)) { switch (flags) { case 0: *(uObject**)dst = *(uObject**)src; break; case uCopyFlagsValue: *(uObject**)dst = (uObject*)src; break; case uCopyFlagsStrong: *(uStrong<uObject*>*)dst = *(uObject**)src; break; case uCopyFlagsStrongValue: *(uStrong<uObject*>*)dst = (uObject*)src; break; default: U_FATAL(); } U_ASSERT(!*(uObject**)dst || uIs(*(uObject**)dst, type)); } else if (type->Flags & uTypeFlagsRetainStruct) { uAutoReleaseStruct(type, dst); memcpy(dst, src, type->ValueSize); uRetainStruct(type, dst); } else INLINE_MEMCPY(dst, src, type->ValueSize); }
void uArray::MarshalPtr(int index, const void* value, size_t size) { uType* type = ((uArrayType*)__type)->ElementType; void* item = (uint8_t*)_ptr + type->ValueSize * index; if (type->ValueSize == size) { INLINE_MEMCPY(item, value, size); if (U_IS_OBJECT(type)) uRetain(*(uObject**)item); } else { // Cast value back to correct type (or throw exception) // * small ints are promoted to 'int' when passed through '...' // * floats are promoted to 'double' when passed through '...' switch (size) { case sizeof(int): switch (type->ValueSize) { case 1: if (type == ::g::Uno::Byte_typeof()) return *(uint8_t*)item = (uint8_t)*(int*)value, void(); else if (type == ::g::Uno::SByte_typeof()) return *(int8_t*)item = (int8_t)*(int*)value, void(); else if (type == ::g::Uno::Bool_typeof()) return *(bool*)item = *(int*)value != 0, void(); break; case 2: if (type == ::g::Uno::Short_typeof()) return *(int16_t*)item = (int16_t)*(int*)value, void(); else if (type == ::g::Uno::UShort_typeof()) return *(uint16_t*)item = (uint16_t)*(int*)value, void(); else if (type == ::g::Uno::Char_typeof()) return *(uChar*)item = (uChar)*(int*)value, void(); break; } case sizeof(double): if (type == ::g::Uno::Float_typeof()) return *(float*)item = (float)*(double*)value, void(); break; } U_THROW_ICE(); } }
static size_t uAlignOf(uType* type) { if (U_IS_OBJECT(type)) return sizeof(uObject*); size_t align = 0; for (size_t i = 0; i < type->FieldCount; i++) { uFieldInfo& f = type->Fields[i]; if (f.Flags & uFieldFlagsStatic) continue; size_t fAlign = uAlignOf(f.Type); if (fAlign > align) align = fAlign; } return align > 0 ? align : type->ValueSize > 0 ? type->ValueSize : 1; }
void uBuildMemory(uType* type) { U_ASSERT(type); if (!type->IsClosed()) return; size_t strongCount = 0, weakCount = 0, objOffset = U_IS_OBJECT(type) ? sizeof(uObject) : 0, typeOffset = 0; if (type->Base) type->Base->Build(); for (size_t i = 0; i < type->FieldCount; i++) { uFieldInfo& f = type->Fields[i]; U_ASSERT(f.Type); if (f.Type != type && !U_IS_OBJECT(f.Type)) f.Type->Build(); if ((f.Flags & uFieldFlagsStatic) == 0) { if ((f.Flags & uFieldFlagsConstrained) == 0) objOffset = f.Offset + f.Type->ValueSize; if (U_IS_VALUE(f.Type)) { strongCount += f.Type->Refs.StrongCount; weakCount += f.Type->Refs.WeakCount; } else if ((f.Flags & uFieldFlagsWeak) != 0) weakCount++; else strongCount++; } else if ((f.Flags & uFieldFlagsConstrained) != 0) { uAlignField(typeOffset, f.Type); f.Offset = typeOffset; typeOffset += f.Type->ValueSize; } } size_t size = typeOffset + (strongCount + weakCount) * sizeof(uRefInfo<size_t>); uint8_t* ptr = (uint8_t*)malloc(size); // Leak memset(ptr, 0, size); type->Refs.Strong = (uRefInfo<size_t>*)ptr; ptr += strongCount * sizeof(uRefInfo<size_t>); type->Refs.Weak = (uRefInfo<size_t>*)ptr; ptr += weakCount * sizeof(uRefInfo<size_t>); for (size_t i = 0; i < type->FieldCount; i++) { #ifdef DEBUG_ARC #define DEBUG_NAME ((Xli::String)type->FullName + "[" + (int)i + "]").CopyPtr(), // Leak #else #define DEBUG_NAME #endif uFieldInfo& f = type->Fields[i]; if ((f.Flags & uFieldFlagsStatic) == 0) { if ((f.Flags & uFieldFlagsConstrained) != 0) { uAlignField(objOffset, f.Type); f.Flags &= ~uFieldFlagsConstrained; f.Offset = objOffset; objOffset += f.Type->ValueSize; } if (U_IS_VALUE(f.Type)) { f.Flags &= ~uFieldFlagsWeak; for (size_t j = 0; j < f.Type->Refs.StrongCount; j++) type->Refs.Strong[type->Refs.StrongCount++] = f.Type->Refs.Strong[j] + f.Offset; for (size_t j = 0; j < f.Type->Refs.WeakCount; j++) type->Refs.Weak[type->Refs.WeakCount++] = f.Type->Refs.Weak[j] + f.Offset; } else if ((f.Flags & uFieldFlagsWeak) != 0) { uRefInfo<size_t> ref = {DEBUG_NAME f.Offset}; type->Refs.Weak[type->Refs.WeakCount++] = ref; } else { uRefInfo<size_t> ref = {DEBUG_NAME f.Offset}; type->Refs.Strong[type->Refs.StrongCount++] = ref; } } else { if ((f.Flags & uFieldFlagsConstrained) != 0) { f.Flags &= ~uFieldFlagsConstrained; f.Offset += (uintptr_t)ptr; } if ((f.Flags & uFieldFlagsWeak) != 0) { uRefInfo<uWeakObject**> ref = {DEBUG_NAME (uWeakObject**)f.Offset}; _WeakRefs->Add(ref); } else if (U_IS_OBJECT(f.Type)) { uRefInfo<uObject**> ref = {DEBUG_NAME (uObject**)f.Offset}; _StrongRefs->Add(ref); } } #undef DEBUG_NAME } if (U_IS_VALUE(type)) { if (objOffset != 0) { uAlignField(objOffset, type); U_ASSERT(type->ValueSize == objOffset || type->ValueSize == 0); type->ValueSize = objOffset; } type->ObjectSize = sizeof(uObject) + type->ValueSize; } else { if (type->Base && type->Base->ObjectSize > objOffset) objOffset = type->Base->ObjectSize; if (objOffset > type->ObjectSize) type->ObjectSize = objOffset; } #ifdef DEBUG_UNSAFE uint8_t* layout = (uint8_t*)U_ALLOCA(type->ObjectSize); memset(layout, 0, type->ObjectSize); for (size_t i = 0; i < type->FieldCount; i++) { uFieldInfo& f = type->Fields[i]; if ((f.Flags & uFieldFlagsStatic) == 0) { for (size_t j = 0; j < f.Type->ValueSize; j++) { U_ASSERT(f.Offset + j < type->ObjectSize); layout[f.Offset + j]++; } } } // Verify that no fields are overlapping for (size_t i = 0; i < type->ObjectSize; i++) U_ASSERT(layout[i] < 2); #endif }