bool CDRMUtils::AddProperty(drmModeAtomicReqPtr req, struct drm_object *object, const char *name, uint64_t value) { uint32_t property_id = GetPropertyId(object, name); if (!property_id) return false; if (drmModeAtomicAddProperty(req, object->id, property_id, value) < 0) { CLog::Log(LOGERROR, "CDRMUtils::%s - could not add property %s", __FUNCTION__, name); return false; } return true; }
bool CDRMUtils::SetProperty(struct drm_object *object, const char *name, uint64_t value) { uint32_t property_id = GetPropertyId(object, name); if (!property_id) return false; if (drmModeObjectSetProperty(m_fd, object->id, object->type, property_id, value) < 0) { CLog::Log(LOGERROR, "CDRMUtils::%s - could not set property %s", __FUNCTION__, name); return false; } return true; }
void DynamicTypeHandler::InvalidateInlineCachesForAllProperties(ScriptContext* requestContext) { int count = GetPropertyCount(); if (count < 128) // Invalidate a propertyId involves dictionary lookups. Only do this when the number is relatively small. { for (int i = 0; i < count; i++) { PropertyId propertyId = GetPropertyId(requestContext, static_cast<PropertyIndex>(i)); if (propertyId != Constants::NoProperty) { isStoreField ? requestContext->InvalidateStoreFieldCaches(propertyId) : requestContext->InvalidateProtoCaches(propertyId); } } } else { isStoreField ? requestContext->InvalidateAllStoreFieldCaches() : requestContext->InvalidateAllProtoCaches(); } }
TypePath * TypePath::Branch(Recycler * recycler, int pathLength, bool couldSeeProto) { AssertMsg(pathLength < this->GetPathLength(), "Why are we branching at the tip of the type path?"); // Ensure there is at least one free entry in the new path, so we can extend it. // TypePath::New will take care of aligning this appropriately. TypePath * branchedPath = TypePath::New(recycler, pathLength + 1); for (PropertyIndex i = 0; i < pathLength; i++) { branchedPath->AddInternal(assignments[i]); #ifdef SUPPORT_FIXED_FIELDS_ON_PATH_TYPES if (couldSeeProto) { if (this->GetData()->usedFixedFields.Test(i)) { // We must conservatively copy all used as fixed bits if some prototype instance could also take // this transition. See comment in PathTypeHandlerBase::ConvertToSimpleDictionaryType. // Yes, we could devise a more efficient way of copying bits 1 through pathLength, if performance of this // code path proves important enough. branchedPath->GetData()->usedFixedFields.Set(i); } else if (this->GetData()->fixedFields.Test(i)) { // We must clear any fixed fields that are not also used as fixed if some prototype instance could also take // this transition. See comment in PathTypeHandlerBase::ConvertToSimpleDictionaryType. this->GetData()->fixedFields.Clear(i); } } #endif } #ifdef SUPPORT_FIXED_FIELDS_ON_PATH_TYPES // When branching, we must ensure that fixed field values on the prefix shared by the two branches are always // consistent. Hence, we can't leave any of them uninitialized, because they could later get initialized to // different values, by two different instances (one on the old branch and one on the new branch). If that happened // and the instance from the old branch later switched to the new branch, it would magically gain a different set // of fixed properties! if (this->GetMaxInitializedLength() < pathLength) { this->SetMaxInitializedLength(pathLength); } branchedPath->SetMaxInitializedLength(pathLength); #endif #ifdef SUPPORT_FIXED_FIELDS_ON_PATH_TYPES if (PHASE_VERBOSE_TRACE1(FixMethodPropsPhase)) { Output::Print(_u("FixedFields: TypePath::Branch: singleton: 0x%p(0x%p)\n"), PointerValue(this->singletonInstance), this->singletonInstance->Get()); Output::Print(_u(" fixed fields:")); for (PropertyIndex i = 0; i < GetPathLength(); i++) { Output::Print(_u(" %s %d%d%d,"), GetPropertyId(i)->GetBuffer(), i < GetMaxInitializedLength() ? 1 : 0, GetIsFixedFieldAt(i, GetPathLength()) ? 1 : 0, GetIsUsedFixedFieldAt(i, GetPathLength()) ? 1 : 0); } Output::Print(_u("\n")); } #endif return branchedPath; }