void KafkaCluster::InitCluster(const std::vector<BrokerAddress>& brokerList) { if ( brokerList.size() == 0 ) { //throw exception throw std::invalid_argument("Kafka broker list is empty!"); return; } for ( uint32_t i = 0; i < brokerList.size(); ++i ) { MetaRequest req; MetaResponsePtr rsp; KafkaBroker tempBroker(brokerList[i].first, brokerList[i].second, 0); rsp = tempBroker.FetchMeta(req); if ( !rsp.get() ) { LOG_WARN("Broker is not availabe: " << brokerList[i].first << ":" << brokerList[i].second); continue; } int32_t maxNodeId = 0; for ( std::list<MetaResponse::Broker>::const_iterator it = rsp->brokerMeta.begin(); it != rsp->brokerMeta.end(); ++it ) { if ( it->nodeId > maxNodeId ) maxNodeId = it->nodeId; } m_brokers.assign(maxNodeId + 1, KafkaBrokerPtr()); for ( std::list<MetaResponse::Broker>::const_iterator it = rsp->brokerMeta.begin(); it != rsp->brokerMeta.end(); ++it ) { KafkaBrokerPtr pb(new KafkaBroker(it->host, it->port, it->nodeId)); m_brokers[it->nodeId] = pb; LOG_INFO("Init broker: " << it->host << ":" << it->port << ", node id " << it->nodeId); } for ( std::list<MetaResponse::TopicMetadata>::const_iterator it = rsp->topicMeta.begin(); it != rsp->topicMeta.end(); ++it ) { KafkaTopicPtr tp(new KafkaTopic(it->name, it->partitionMeta.size())); for ( std::list<MetaResponse::TopicMetadata::PartitionMetadata>::const_iterator pit = it->partitionMeta.begin(); pit != it->partitionMeta.end(); ++pit ) { KafkaPartition partition(pit->partitionId, pit->leader, 0); tp->SetPartition(partition); LOG_INFO("Init partition: topic " << it->name << ", partition id " << pit->partitionId << ", leader " << pit->leader); } m_topics[tp->GetName()] = tp; UpdateOffset(*tp); } break; } if ( !m_brokers.size() ) { LOG_ERROR("No kafka broker is availabe!"); throw std::invalid_argument("No kafka broker works!"); } }
void FilterNodeConvolveD2D1::SetAttribute(uint32_t aIndex, const IntPoint &aValue) { if (aIndex != ATT_CONVOLVE_MATRIX_TARGET) { MOZ_ASSERT(false); return; } mTarget = aValue; UpdateOffset(); }
void FilterNodeConvolveD2D1::SetAttribute(uint32_t aIndex, const IntSize &aValue) { if (aIndex != ATT_CONVOLVE_MATRIX_KERNEL_SIZE) { MOZ_ASSERT(false); return; } mKernelSize = aValue; mEffect->SetValue(D2D1_CONVOLVEMATRIX_PROP_KERNEL_SIZE_X, aValue.width); mEffect->SetValue(D2D1_CONVOLVEMATRIX_PROP_KERNEL_SIZE_Y, aValue.height); UpdateOffset(); }
FetchResponsePtr KafkaCluster::FetchMessage(const std::string& topic, bool fromEnding) { if ( m_topics.find(topic) == m_topics.end() ) { return FetchResponsePtr(); } uint32_t brokers = m_brokers.size(); std::tr1::shared_ptr<FetchRequest::TopicRequest> trequest( new FetchRequest::TopicRequest[brokers], ArrayDeleter<FetchRequest::TopicRequest>()); KafkaTopicPtr pt = m_topics.find(topic)->second; uint32_t num = pt->GetPartitionNum(); for ( uint32_t i = 0; i < num; ++i ) { uint32_t id = pt->GetBrokerLeader(i); if ( id >= brokers ) { continue; } trequest.get()[id].partition.push_back( FetchRequest::TopicRequest::PartitionRequest(i, fromEnding ? pt->GetOffset(i) : 0)); } FetchResponsePtr response(new FetchResponse()); for ( uint32_t i = 0; i < brokers; ++i ) { if ( trequest.get()[i].partition.size() > 0 ) { trequest.get()[i].name = topic; FetchRequest request; request.topic.push_back(trequest.get()[i]); FetchResponsePtr rsp = m_brokers[i]->FetchMessage(request); UpdateOffset(*pt, *rsp); response->splice(response->end(), *rsp); } } return response; }
virtual void ProcessEvent( EFlowEvent event, SActivationInfo *pActInfo ) { CFlowEntityNodeBase::ProcessEvent(event,pActInfo); switch (event) { case eFE_Activate: { if (IsPortActive(pActInfo, eIP_Attach)) { AttachObject(pActInfo); } if (IsPortActive(pActInfo, eIP_Detach)) { DetachObject(pActInfo); } if (IsPortActive(pActInfo, eIP_Hide)) { HideAttachment(pActInfo); } else if (IsPortActive(pActInfo, eIP_UnHide)) { UnHideAttachment(pActInfo); } if (IsPortActive(pActInfo, eIP_RotationOffset) || IsPortActive(pActInfo, eIP_TranslationOffset)) { UpdateOffset(pActInfo); } break; } case eFE_Initialize: if (gEnv->IsEditor() && m_entityId) { UnregisterEvent(m_event); RegisterEvent(m_event); m_pNewAttachment = NULL; } } }
void KafkaCluster::UpdateOffset(KafkaTopic& topic) { uint32_t num = topic.GetPartitionNum(); uint32_t brokers = m_brokers.size(); if ( num == 0 ) { return; } std::tr1::shared_ptr<OffsetRequest::TopicRequest> trequest( new OffsetRequest::TopicRequest[brokers], ArrayDeleter<OffsetRequest::TopicRequest>()); for ( uint32_t i = 0; i < num; ++i ) { uint32_t id = topic.GetBrokerLeader(i); if ( id >= brokers ) { continue; } trequest.get()[id].partition.push_back(OffsetRequest::TopicRequest::PartitionRequest(i)); } for ( uint32_t i = 0; i < brokers; ++i ) { if ( trequest.get()[i].partition.size() > 0 ) { trequest.get()[i].name = topic.GetName(); OffsetRequest request; request.topic.push_back(trequest.get()[i]); OffsetResponsePtr response = m_brokers[i]->FetchOffset(request); if ( response.get() ) { UpdateOffset(topic, *response); } } } }
void AttachObject(SActivationInfo *pActInfo) { if (pActInfo->pEntity) { EntityId entityId = GetPortEntityId(pActInfo, eIP_EntityId); IEntity *pEntity = gEnv->pEntitySystem->GetEntity(entityId); if (pEntity) { //Keep track of the last attachment performed in order to properly reset the state in the end m_pNewAttachment = GetAttachment(pActInfo); if(m_pNewAttachment) { CEntityAttachment *pEntityAttachment = new CEntityAttachment; pEntityAttachment->SetEntityId(entityId); m_pNewAttachment->AddBinding(pEntityAttachment); UpdateOffset(pActInfo); ActivateOutput(pActInfo, eOP_Attached, 0); } } } }