void MembershipTableMgr::handleNormalJoinEvent(const MemberWrapper& member) { function_footprint(); queue.push(member); if(joining) { DVLOG(0) << "$$$$$$$$$$$$$$$$$$$$$$$$$$"; DVLOG(0) << "$$$$$$$$$$$$$$$$$$$$$$$$$$"; DVLOG(0) << "current joining member queue size: " << queue.unsafe_size(); DVLOG(0) << "$$$$$$$$$$$$$$$$$$$$$$$$$$"; DVLOG(0) << "$$$$$$$$$$$$$$$$$$$$$$$$$$"; return; } while(!queue.empty()) { joining.store(true); MemberWrapper joined_member; queue.try_pop(joined_member); // leading generate whole member table and send to joining member. auto wholeMemberTable = std::make_shared<WholeMembershipTableEvent>(); genMembershipTable(*wholeMemberTable->mutable_table()); ResultCode rs; rs = multicastMemberMessage(WHOLE_MEMBERSHIP_TABLE, wholeMemberTable); if (rs != RC_SUCCESS) { LOG(ERROR)<< getErrorDescription(rs); return; } // leading send delta member to all members. auto deltaMember = std::make_shared<DeltaMemberEvent>(); deltaMember->set_position(findAddPos()); deltaMember->mutable_member()->CopyFrom(joined_member.getMember()); rs = multicastMemberMessage(DELTA_MEMBER_AND_JOIN_POSITION, deltaMember); if (rs != RC_SUCCESS) { LOG(ERROR)<< getErrorDescription(rs); return; } } }
void serialize( Archive & ar, const unsigned int version ) { DVLOG(2) << "[BDF::serialize] serialize BDFBase\n"; #if 0 ar & M_order; ar & M_name; ar & M_time; ar & M_n_restart; ar & M_Tf; #endif //ar & M_time_orders; ar & boost::serialization::make_nvp( "time_values", M_time_values_map ); //DVLOG(2) << "[BDF::serialize] time orders size: " << M_time_orders.size() << "\n"; DVLOG(2) << "[BDF::serialize] time values size: " << M_time_values_map.size() << "\n"; for ( auto it = M_time_values_map.begin(), en = M_time_values_map.end(); it!=en; ++it ) { //LOG(INFO) << "[Bdf] order " << i << "=" << M_time_orders[i] << "\n"; DVLOG(2) << "[Bdf::serialize] value " << *it << "\n"; } DVLOG(2) << "[BDF::serialize] serialize BDFBase done\n"; }
A_Entry GlobalQueue<T>::push_reserve ( bool ignore ) { CHECK( isMaster() ); Grappa::Metrics::global_queue_stats.record_push_reserve_reply( Grappa_sizeof_delegate_func_reply< bool, A_Entry >() ); DVLOG(5) << "push_reserve"; CHECK( capacity > 0 ); if ( (tail % capacity == head % capacity) && (tail != head) ) { return make_global( static_cast< QueueEntry<T> * >( NULL ) ); // no room } else { A_Entry assigned = queueBase + (tail % capacity); tail++; // if there are any consumers, wake oldest and give the address just produced if ( pullReserveWaiters.size() > 0 ) { CHECK( head == tail-1 ) << "Size should be exactly one, since there are waiters and one value was just produced"; DVLOG(5) << "push_reserve: found waiters"; A_Entry granted = assigned; head++; A_D_A_Entry w = pullReserveWaiters.front(); pullReserveWaiters.pop(); pull_reserve_sendreply( w, &granted, true ); } return assigned; } }
static void UpdateComponent( spaceT const& Xh, Epetra_MultiVector& sol, Epetra_MultiVector& comp ) { Epetra_Map componentMap ( epetraMap( Xh->template functionSpace<index>()->map() ) ); Epetra_Map globalMap ( epetraMap( Xh->map() ) ); int shift = Xh->nDofStart( index ); int Length = comp.MyLength(); for ( int i=0; i < Length; i++ ) { int compGlobalID = componentMap.GID( i ); if ( compGlobalID >= 0 ) { int compLocalID = componentMap.LID( compGlobalID ); int localID = globalMap.LID( compGlobalID+shift ); // int globalID = globalMap.GID(localID); DVLOG(2) << "Copy entry component[" << compLocalID << "] to sol[" << localID << "]=" << sol[0][localID] << "]\n"; sol[0][localID] = comp[0][compLocalID] ; DVLOG(2) << comp[0][compLocalID] << "\n"; } } }
void StoreDelegateRddActor::handleRddCreate(const ActorMessagePtr& msg) { DVLOG(2) << "StoreDelegateRddActor : handle create store delegate."; rawMsg = msg; CreateDelegateRddRequest* request = dynamic_cast<CreateDelegateRddRequest*>(msg->getPayload().get()); idgs::store::MetadataHelper::loadStoreMetadata(request->store_name(), metadata.get()); ClusterFramework& cluster = ::idgs::util::singleton<ClusterFramework>::getInstance(); for (int32_t partition = 0; partition < partitionSize; ++partition) { int32_t memberId = cluster.getPartitionManager()->getPartition(partition)->getPrimaryMemberId(); shared_ptr<CreateDelegatePartitionRequest> payload(new CreateDelegatePartitionRequest); payload->set_store_name(request->store_name()); payload->set_partition(partition); payload->set_rdd_name(getRddName()); ActorMessagePtr reqMsg = createActorMessage(); reqMsg->setOperationName(CREATE_DELEGATE_PARTITION); reqMsg->setDestActorId(RDD_SERVICE_ACTOR); reqMsg->setDestMemberId(memberId); reqMsg->setPayload(payload); DVLOG(3) << "RDD \"" << getRddName() << "\" sending create RDD partition to member " << memberId; ::idgs::actor::postMessage(reqMsg); } }
bool GlobalQueue<T>::push( GlobalAddress<T> chunk_base, uint64_t chunk_amount ) { CHECK( initialized ); DVLOG(5) << "push() base:" << chunk_base << " amount:" << chunk_amount; GlobalAddress< QueueEntry<T> > loc = Grappa_delegate_func< bool, GlobalAddress< QueueEntry<T> >, GlobalQueue<T>::push_reserve_g > ( false, HOME_NODE ); size_t msg_bytes = Grappa_sizeof_delegate_func_request< bool, GlobalAddress< QueueEntry<T> > >( ); DVLOG(5) << "push() reserve done -- loc:" << loc; if ( loc.pointer() == NULL ) { Grappa::Metrics::global_queue_stats.record_push_reserve_request( msg_bytes, false ); // no space in global queue; push failed return false; } Grappa::Metrics::global_queue_stats.record_push_reserve_request( msg_bytes, true ); // push the queue entry that points to my chunk ChunkInfo<T> c; c.base = chunk_base; c.amount = chunk_amount; push_entry_args<T> entry_args; entry_args.target = loc; entry_args.chunk = c; DVLOG(5) << "push() sending entry to " << loc; bool had_sleeper = Grappa_delegate_func< push_entry_args<T>, bool, GlobalQueue<T>::push_entry_g > ( entry_args, loc.core() ); size_t entry_msg_bytes = Grappa_sizeof_delegate_func_request< push_entry_args<T>, bool >( ); Grappa::Metrics::global_queue_stats.record_push_entry_request( entry_msg_bytes, had_sleeper ); return true; }
typename BackendPetsc<T>::solve_return_type BackendPetsc<T>::solve( sparse_matrix_type const& A, vector_type& x, vector_type const& b ) { M_solver_petsc.setPrefix( this->prefix() ); M_solver_petsc.setPreconditionerType( this->pcEnumType() ); M_solver_petsc.setSolverType( this->kspEnumType() ); if (!M_solver_petsc.initialized()) M_solver_petsc.attachPreconditioner( this->M_preconditioner ); M_solver_petsc.setConstantNullSpace( this->hasConstantNullSpace() ); M_solver_petsc.setFieldSplitType( this->fieldSplitEnumType() ); M_solver_petsc.setTolerances( _rtolerance=this->rTolerance(), _atolerance=this->aTolerance(), _dtolerance=this->dTolerance(), _maxit = this->maxIterations() ); M_solver_petsc.setPrecMatrixStructure( this->precMatrixStructure() ); M_solver_petsc.setMatSolverPackageType( this->matSolverPackageEnumType() ); M_solver_petsc.setShowKSPMonitor( this->showKSPMonitor() ); M_solver_petsc.setShowKSPConvergedReason( this->showKSPConvergedReason() ); auto res = M_solver_petsc.solve( A, x, b, this->rTolerance(), this->maxIterations() ); DVLOG(2) << "[BackendPetsc::solve] number of iterations : " << res.template get<1>() << "\n"; DVLOG(2) << "[BackendPetsc::solve] residual : " << res.template get<2>() << "\n"; if ( !res.template get<0>() ) LOG(ERROR) << "Backend " << this->prefix() << " : linear solver failed to converge" << std::endl; return res; } // BackendPetsc::solve
/// Mark a certain number of things completed. When the global count on all cores goes to 0, all /// tasks waiting on the GCE will be woken. /// /// Note: this can be called in a message handler (e.g. remote completes from stolen tasks). void complete(int64_t dec = 1) { count -= dec; DVLOG(4) << "complete (" << count << ") -- gce(" << this << ")"; // out of work here if (count == 0) { // count[dec -> 0] // enter cancellable barrier send_heap_message(master_core, [this] { cores_out--; DVLOG(4) << "core entered barrier (cores_out:"<< cores_out <<")"; // if all are in if (cores_out == 0) { // cores_out[1 -> 0] CHECK_EQ(count, 0); // notify everyone to wake for (Core c = 0; c < cores(); c++) { send_heap_message(c, [this] { CHECK_EQ(count, 0); DVLOG(3) << "broadcast"; broadcast(&cv); // wake anyone who was waiting here reset(); // reset, now anyone else calling `wait` should fall through }); } } }); } }
void try_merge_buddy_recursive( ChunkMap::iterator cmit ) { // compute address of buddy intptr_t address = cmit->second.address; intptr_t buddy_address = (address ^ cmit->second.size); DVLOG(5) << cmit->second << " buddy address " << (void *) buddy_address; // does it exist? ChunkMap::iterator buddy_iterator = chunks_.find( buddy_address ); if( buddy_iterator != chunks_.end() && buddy_iterator->second.size == cmit->second.size && buddy_iterator->second.in_use == false ) { DVLOG(5) << "buddy found! address " << (void *) address << " buddy address " << (void *) buddy_address; // remove the higher-addressed chunk ChunkMap::iterator higher_iterator = address < buddy_address ? buddy_iterator : cmit; remove_from_free_list( higher_iterator ); chunks_.erase( higher_iterator ); // keep the the lower-addressed chunk in the map: // update its size and move it to the right free list ChunkMap::iterator lower_iterator = address < buddy_address ? cmit : buddy_iterator; remove_from_free_list( lower_iterator ); // should these be swapped? I think so. lower_iterator->second.size *= 2; add_to_free_list( lower_iterator ); // see if we have more to merge try_merge_buddy_recursive( lower_iterator ); } }
void block_until_acquired() { if( !acquired_ ) { start_acquire(); #ifdef VTRACE_FULL VT_TRACER("incoherent block_until_acquired"); #endif DVLOG(5) << "Worker " << Grappa::current_worker() << " ready to block on " << *request_address_ << " * " << *count_ ; if( !acquired_ ) { start_time_ = Grappa::timestamp(); } else { start_time_ = 0; } while( !acquired_ ) { DVLOG(5) << "Worker " << Grappa::current_worker() << " blocking on " << *request_address_ << " * " << *count_ ; if( !acquired_ ) { thread_ = Grappa::current_worker(); Grappa::suspend(); thread_ = NULL; } DVLOG(5) << "Worker " << Grappa::current_worker() << " woke up for " << *request_address_ << " * " << *count_ ; } IAMetrics::record_wakeup_latency( start_time_, network_time_ ); } }
static Epetra_MultiVector getComponent( spaceT const& Xh, Epetra_MultiVector const& sol ) { Epetra_Map componentMap ( epetraMap( Xh->template functionSpace<index>()->map() ) ); Epetra_Map globalMap ( epetraMap( Xh->map() ) ); //DVLOG(2) << "Component map: " << componentMap << "\n"; Epetra_MultiVector component( componentMap, 1 ); int Length = component.MyLength(); int shift = Xh->nDofStart( index ); for ( int i=0; i < Length; i++ ) { int compGlobalID = componentMap.GID( i ); if ( compGlobalID >= 0 ) { int compLocalID = componentMap.LID( compGlobalID ); int localID = globalMap.LID( compGlobalID+shift ); // int globalID = globalMap.GID(localID); DVLOG(2) << "[MyBackend] Copy entry sol[" << localID << "]=" << sol[0][localID] << " to component[" << compLocalID << "]\n"; component[0][compLocalID] = sol[0][localID]; DVLOG(2) << component[0][compLocalID] << "\n"; } } return component; }
T * block_until_pop() { DVLOG(5) << __PRETTY_FUNCTION__ << "/" << this << ": blocking until pop with " << s_.get_value() << " now"; s_.decrement(); T * result = ptrs_[s_.get_value()]; DVLOG(5) << __PRETTY_FUNCTION__ << "/" << this << ": finished blocking until pop with " << s_.get_value() << "/" << result; return result; }
uint32_t InnerTcpConnection::connect(uint32_t memberId, int retry) { InnerTcpConnectionState expectedState = INITIAL; if(!state.compare_exchange_strong(expectedState, CONNECTING)) { DVLOG(2) << "Already connecting to remote peer: " << memberId; return 0; } setPeerMemberId(memberId); /// connect to remote peer auto ep = ::idgs::util::singleton<idgs::actor::RpcFramework>::getInstance().getNetwork()->getEndPoint(memberId); if(ep == NULL) { LOG(ERROR) << "Network endpoint of member " << memberId << " is not available."; terminate(); return RC_CLIENT_SERVER_IS_NOT_AVAILABLE; } auto& end_point = ep->tcpEndPoint; DVLOG(0) << "Connecting to remote peer " << memberId << '(' << end_point << ")"; auto conn = shared_from_this(); try { socket.async_connect(end_point, [conn, retry](const asio::error_code& error) { conn->handleConnect(error, retry); }); } catch (std::exception& e) { LOG(ERROR) << "Failed to connect to remote peer " << memberId << ", exception: " << e.what(); terminate(); } return 0; }
bool PartionManagableNode::processGetPartitionTableReq(OperationContext& context) { if (!checkOperationName(context, ADMIN_GET_REQUEST)) { return false; } idgs::Application& app = ::idgs::util::singleton<idgs::Application>::getInstance(); PartitionTableMgr* pm = app.getPartitionManager(); idgs::pb::PartitionTable table; pm->genPartitionTable(table); DVLOG(4) << "get Partition Table:\n" << table.DebugString(); string jsonBody = protobuf::JsonMessage::toJsonString(&table); DVLOG(3) << "get json for partition table :\n" << jsonBody; std::shared_ptr<idgs::actor::ActorMessage> resposne = idgs::admin::util::createAdminResponse(context, ::idgs::admin::pb::Success, jsonBody); idgs::actor::sendMessage(resposne); return true; }
bool SplitStringIntoKeyValues( const std::string& line, char key_value_delimiter, std::string* key, std::vector<std::string>* values) { key->clear(); values->clear(); // 查找key. size_t end_key_pos = line.find_first_of(key_value_delimiter); if(end_key_pos == std::string::npos) { DVLOG(1) << "cannot parse key from line: " << line; return false; // 没有key. } key->assign(line, 0, end_key_pos); // 查找values. std::string remains(line, end_key_pos, line.size()-end_key_pos); size_t begin_values_pos = remains.find_first_not_of(key_value_delimiter); if(begin_values_pos == std::string::npos) { DVLOG(1) << "cannot parse value from line: " << line; return false; // 没有value. } std::string values_string(remains, begin_values_pos, remains.size()-begin_values_pos); // 添加到vector. values->push_back(values_string); return true; }
/// Enroll more things that need to be completed before the global completion is, well, complete. /// This will send a cancel to the master if this core previously entered the cancellable barrier. /// /// Blocks until cancel completes (if it must cancel) to ensure correct ordering, therefore /// cannot be called from message handler. void enroll(int64_t inc = 1) { if (inc == 0) return; CHECK_GE(inc, 1); count += inc; DVLOG(5) << "enroll " << inc << " -> " << count << " gce("<<this<<")"; // first one to have work here if (count == inc) { // count[0 -> inc] event_in_progress = true; // optimization to save checking in wait() // cancel barrier Core co = impl::call(master_core, [this] { cores_out++; return cores_out; }); // first one to cancel barrier should make sure other cores are ready to wait if (co == 1) { // cores_out[0 -> 1] event_in_progress = true; call_on_all_cores([this] { event_in_progress = true; }); CHECK(event_in_progress); } // block until cancelled CHECK_GT(count, 0); DVLOG(2) << "gce(" << this << " cores_out: " << co << ", count: " << count << ")"; } }
bool lookup ( K key ) { ++hashset_lookup_ops; if (FLAGS_flat_combining) { ResultEntry re{false,nullptr}; DVLOG(3) << "lookup[" << key << "] = " << &re; proxy.combine([&re,key,this](Proxy& p){ // if (p.keys_to_insert.count(key) > 0) { // ++hashset_matched_lookups; // re.result = true; // return FCStatus::SATISFIED; // } else { if (p.lookups.count(key) == 0) p.lookups[key] = nullptr; re.next = p.lookups[key]; p.lookups[key] = &re; DVLOG(3) << "p.lookups[" << key << "] = " << &re; return FCStatus::BLOCKED; // } }); return re.result; } else { ++hashset_lookup_msgs; return delegate::call(base+computeIndex(key), [key](Cell* c){ for (auto& e : c->entries) if (e.key == key) return true; return false; }); } }
T readFF(GlobalAddress<FullEmpty<T>> fe_addr) { if (fe_addr.core() == mycore()) { DVLOG(2) << "local"; return fe_addr.pointer()->readFF(); } FullEmpty<T> result; auto result_addr = make_global(&result); send_message(fe_addr.core(), [fe_addr,result_addr]{ auto& fe = *fe_addr.pointer(); if (fe.full()) { // DVLOG(2) << "no need to block"; fill_remote(result_addr, fe.readFF()); return; } DVLOG(2) << "setting up to block (" << fe_addr << ")"; auto* c = SuspendedDelegate::create([&fe,result_addr]{ VLOG(0) << "suspended_delegate!"; fill_remote(result_addr, fe.readFF()); }); add_waiter(&fe, c); }); return result.readFF(); }
//! \return true if the mesh is related to the mesh \p m bool isParentMeshOf( boost::shared_ptr<MeshBase> m ) const { DVLOG(4) << "isParentMeshOf<mesh_ptrtype> called\n"; bool res = m->isSubMeshFrom( this ); if ( res == false ) return res; DVLOG(4) << "this isParentMeshOf m: " << res << "\n"; return res; }
//! \return true if the mesh is related to the mesh \p m bool isSubMeshFrom( MeshBase const* m ) const { DVLOG(4) << "isSubMeshFrom<mesh_ptrtype> called\n"; if ( !M_smd ) return false; bool res= (M_smd->mesh.get() == m); DVLOG(4) << "this isSubMeshFrom m: " << res << "\n"; return res; }
//! \return true if the mesh is related to the mesh \p m bool isSiblingOf( boost::shared_ptr<MeshBase> m ) const { DVLOG(4) << "isSibling<mesh_ptrtype> called\n"; if ( !M_smd || !m->hasSubMeshData() ) return false; bool res = M_smd->mesh.get() == m->M_smd->mesh.get(); if ( res == false ) return res; DVLOG(4) << "this isSibling m: " << res << "\n"; return res; }
bool ActorframeworkNode::processActorsReq(OperationContext& context) { idgs::admin::AttributePathPtr& attr = context.attr; DVLOG(3) << "will process actors admin request:" << attr->getFullPath(); if (!checkOperationName(context, ADMIN_GET_REQUEST)) { return false; } string memInfoJson; idgs::Application& app = ::idgs::util::singleton<idgs::Application>::getInstance(); std::string member_id; attr->getParameterValue(MEMBER_ID_PARAM, member_id); DVLOG(3) << "get member id: " << member_id; if (app.getMemberManager()->findMember(atoi(member_id.c_str())) == NULL) { std::shared_ptr<idgs::actor::ActorMessage> resposne = idgs::admin::util::createAdminResponse(context, ::idgs::admin::pb::Error, "can not find member " + member_id); idgs::actor::sendMessage(resposne); return false; } int targetMemberId = atoi(member_id.c_str()); uint32_t localMemberId = app.getMemberManager()->getLocalMemberId(); if (localMemberId != targetMemberId) { idgs::actor::ActorMessagePtr& actorMsg = context.actorMsg; idgs::actor::ActorMessagePtr routMsg = actorMsg->createRouteMessage(targetMemberId, ADMIN_ACTOR_ID); DVLOG(3) << "Actor Admin request [" << actorMsg->toString() << "] will be routed to member " << targetMemberId; idgs::actor::sendMessage(routMsg); return false; } std::string body=""; DVLOG(3) << "process attribute: " << attr->getAttributePath(); if (attr->getAttributePath() == ALL_STATEFUL_ACTORS) { idgs::actor::StatefulActorMap& actorsMap = app.getActorframework()->getStatefulActors(); body = statefulActos2Json(actorsMap); } else if (attr->getAttributePath() == ALL_STATELESS_ACTORS) { } else if (attr->getAttributePath() == ACTOR) { } else { } DVLOG(3) << "The response body is : \n" << body; std::shared_ptr<idgs::actor::ActorMessage> resposne = idgs::admin::util::createAdminResponse(context, ::idgs::admin::pb::Success, body); idgs::actor::sendMessage(resposne); return true; }
/// delegate malloc static GlobalAddress< void > remote_malloc( size_t size_bytes ) { // ask node 0 to allocate memory auto allocated_address = Grappa::impl::call( 0, [size_bytes] { DVLOG(5) << "got malloc request for size " << size_bytes; GlobalAddress< void > a = global_allocator->local_malloc( size_bytes ); DVLOG(5) << "malloc returning pointer " << a.pointer(); return a; }); return allocated_address; }
bool try_push( T * buf ) { DVLOG(5) << __PRETTY_FUNCTION__ << "/" << this << ": trying to push " << buf << " with " << s_.get_value() << " already"; if( s_.get_value() < max_count ) { push(buf); DVLOG(5) << __PRETTY_FUNCTION__ << "/" << this << ": succeeded; pushed " << buf << " with " << s_.get_value() << " now"; return true; } else { return false; } }
T * try_pop() { DVLOG(5) << __PRETTY_FUNCTION__ << "/" << this << ": trying to pop with " << s_.get_value() << " now"; if( s_.try_decrement() ) { T * t = ptrs_[ s_.get_value() ]; DVLOG(5) << __PRETTY_FUNCTION__ << "/" << this << ": succeeded; popping " << t << " with " << s_.get_value() << " now"; return t; } else { return NULL; } }
LocalMapGraph::LocalMapGraph (std::unordered_set<Edge, Edge_hasher>& edges) : adjs() { // assume that the vertex ids are not compressed DVLOG(5) << "local construction: "; for (auto e : edges) { DVLOG(5) << " " << e; auto& val = adjs[e.src]; val.insert(e.dst); } }
LocalAdjListGraph::LocalAdjListGraph(std::vector<Edge>& edges) : adjs() { // assume that the vertex ids are not compressed DVLOG(5) << "local construction: "; for (auto e : edges) { DVLOG(5) << " " << e; auto& val = adjs[e.src]; val.push_back(e.dst); } }
/// ser to string, de by array void check_serstr_dearray(const Message* src, Message* dest) { std::string serde_str; ProtoSerdes<PB_BINARY>::serialize(src, &serde_str); DVLOG(5) << dumpBinaryBuffer2(serde_str.c_str(), serde_str.length()); ProtoSerdes<PB_BINARY>::deserializeFromArray(serde_str.c_str(), serde_str.length(), dest); if(src->DebugString().compare(dest->DebugString())) { LOG(FATAL) << "\nexpect: " << src->DebugString() << "\nactual: " << dest->DebugString(); } DVLOG(5) << dest->DebugString(); }
void ThreadPimpl::mcs_release_lock(xct::McsLock* mcs_lock, xct::McsBlockIndex block_index) { assorted::memory_fence_acq_rel(); assert_mcs_aligned(mcs_lock); ASSERT_ND(mcs_lock->is_locked()); ASSERT_ND(block_index > 0); ASSERT_ND(current_xct_.get_mcs_block_current() >= block_index); xct::McsBlock* block = mcs_blocks_ + block_index; ASSERT_ND(!block->waiting_); ASSERT_ND(block->lock_addr_tag_ == mcs_lock->last_1byte_addr()); if (block->successor_block_ == 0) { // okay, successor "seems" nullptr (not contended), but we have to make it sure with atomic CAS uint32_t expected = xct::McsLock::to_int(id_, block_index); uint32_t* address = &(mcs_lock->data_); assert_mcs_aligned(address); #if defined(__GNUC__) // GCC's builtin atomic. maybe a bit faster because we don't have to give an address of expected bool swapped = __sync_bool_compare_and_swap(address, expected, 0); #else // defined(__GNUC__) bool swapped = assorted::raw_atomic_compare_exchange_strong<uint32_t>(address, &expected, 0); #endif // defined(__GNUC__) if (swapped) { // we have just unset the locked flag, but someone else might have just acquired it, // so we can't put assertion here. ASSERT_ND(id_ == 0 || mcs_lock->get_tail_waiter() != id_); DVLOG(2) << "Okay, release a lock uncontended. me=" << id_; assorted::memory_fence_acq_rel(); return; } DVLOG(0) << "Interesting contention on MCS release. I thought it's null, but someone has just " " jumped in. me=" << id_ << ", mcs_lock=" << *mcs_lock; // wait for someone else to set the successor ASSERT_ND(mcs_lock->is_locked()); uint64_t spins = 0; while (block->successor_block_ == 0) { ASSERT_ND(mcs_lock->is_locked()); if (((++spins) & 0xFFFFFFU) == 0) { assorted::spinlock_yield(); } assorted::memory_fence_acquire(); continue; } } DVLOG(1) << "Okay, I have a successor. me=" << id_ << ", succ=" << block->successor_; ASSERT_ND(block->successor_ != id_); ThreadRef* successor = engine_->get_thread_pool()->get_thread_ref(block->successor_); ASSERT_ND(successor->get_control_block()->mcs_block_current_ >= block->successor_block_); xct::McsBlock* succ_block = successor->get_mcs_blocks() + block->successor_block_; ASSERT_ND(succ_block->lock_addr_tag_ == mcs_lock->last_1byte_addr()); ASSERT_ND(succ_block->waiting_); ASSERT_ND(mcs_lock->is_locked()); assorted::memory_fence_acq_rel(); succ_block->waiting_ = false; assorted::memory_fence_acq_rel(); }
LocalAdjListGraph::LocalAdjListGraph(std::unordered_set<Edge, Edge_hasher>& edges) : adjs() { // assume that the vertex ids are not compressed DVLOG(5) << "local construction: "; for (auto e : edges) { DVLOG(5) << " " << e; VLOG_EVERY_N(4, 100000) << "edges: " << google::COUNTER; auto& val = adjs[e.src]; val.push_back(e.dst); } }