void tick() { if (m_transmit_on_receive && !m_new_packet) { // In this mode we only transmit if we got an packet return; } // We send a packet either: // 1) We are transmitting on receive and we got a packet // 2) We always transmit on every tick if (m_recode_on) { m_decoder->write_payload(&m_recode_buffer[0]); packet p(m_recode_buffer); p.set_sender(node_id()); forward_packet(p); } else { if (!m_last_packet.is_valid()) return; m_last_packet.set_sender(node_id()); forward_packet(m_last_packet); } m_new_packet = false; }
void NetDBase::load( const std::string & filename ) { Helper::checkFileExists( filename ); InFile F1( filename ); // expect format g1 , g2 , score // tab-delimited drop_index(); sql.begin(); int edge_cnt = 0 , node_cnt = 0; while ( ! F1.eof() ) { std::string n1 , n2; double sc; F1 >> n1 >> n2 >> sc; if ( n1 == "" ) break; int nid1 = node_id( n1 ); if ( nid1 == 0 ) { nid1 = add_node( n1 ); ++node_cnt; } int nid2 = node_id( n2 ); if ( nid2 == 0 ) { nid2 = add_node( n2 ); ++node_cnt; } sql.bind_int( stmt_insert_edge , ":n1" , nid1 ); sql.bind_int( stmt_insert_edge , ":n2" , nid2 ); sql.bind_double( stmt_insert_edge , ":score" , sc ); sql.step( stmt_insert_edge ); sql.reset( stmt_insert_edge ); sql.bind_int( stmt_insert_edge , ":n1" , nid2 ); sql.bind_int( stmt_insert_edge , ":n2" , nid1 ); sql.bind_double( stmt_insert_edge , ":score" , sc ); sql.step( stmt_insert_edge ); sql.reset( stmt_insert_edge ); ++edge_cnt; if ( edge_cnt % 1000 == 0 ) plog << edge_cnt << " edges\t" << node_id_map.size() << " nodes \n"; } plog << "added " << node_cnt << " " << node_id_map.size() << " unique nodes, " << edge_cnt << " edges\n"; sql.commit(); index(); }
int DoMergeDevices (boardptr curbrdptr, boardptr mergedbrdptr, char *signalname) { int dx=0; int dy=0; int ox=0,oy=0; int mx=0,my=0; komponen komp; int nodeid; Str63 asal1,asal2; if ( node_cari(&curbrdptr->snode, signalname, 0)<0) return BEEVEE_MERGE_ERROR_CURNOSIGNAL; nodeid = node_id(&curbrdptr->snode); if (tpt_cari(&curbrdptr->stestpoint, nodeid, 0)<0) return BEEVEE_MERGE_ERROR_CURNOTP; tpt_xy (&curbrdptr->stestpoint, &ox, &oy); if ( node_cari(&mergedbrdptr->snode, signalname, 0)<0) return BEEVEE_MERGE_ERROR_MERNOSIGNAL; nodeid = node_id(&mergedbrdptr->snode); if (tpt_cari(&mergedbrdptr->stestpoint, nodeid, 0)<0) return BEEVEE_MERGE_ERROR_MERNOTP; tpt_xy (&mergedbrdptr->stestpoint, &mx, &my); dx = ox-mx; dy = oy-my; sdata_kekepala (&mergedbrdptr->skomp); while (1) { sdata_ambildataini(&mergedbrdptr->skomp, &komp); /* manipulasi XY */ object_geser(&komp.pos_x1, &komp.pos_y1, dx, dy); object_geser(&komp.pos_x2, &komp.pos_y2, dx, dy); sdata_tambah(&curbrdptr->skomp, &komp, sizeof(komponen)); if (sdata_diekorkah(&mergedbrdptr->skomp)) break; sdata_kedepan(&mergedbrdptr->skomp); } return 1; /* board_asal (curbrdptr, asal1); board_asal (mergedbrdptr, asal2); if (strstr(asal1,asal2)==NULL) { strcat(asal1,asal2); } */ }
void HexFixture::fill_node_map(const std::map<int,std::vector<EntityId> > ¶llel_distribution) { m_nodes_to_procs.clear(); std::map<int,std::vector<EntityId> >::const_iterator pd_i, pd_e; pd_i = parallel_distribution.begin(); pd_e = parallel_distribution.end(); for (; pd_i != pd_e; ++pd_i) { const int proc = pd_i->first; const std::vector<EntityId> &elements = pd_i->second; for (size_t e_j = 0; e_j < elements.size(); ++ e_j) { EntityId element_id = elements[e_j]; size_t ix = 0, iy = 0, iz = 0; elem_x_y_z(element_id, ix, iy, iz); stk::mesh::EntityId elem_node[8] ; elem_node[0] = node_id( ix , iy , iz ); elem_node[1] = node_id( ix+1 , iy , iz ); elem_node[2] = node_id( ix+1 , iy+1 , iz ); elem_node[3] = node_id( ix , iy+1 , iz ); elem_node[4] = node_id( ix , iy , iz+1 ); elem_node[5] = node_id( ix+1 , iy , iz+1 ); elem_node[6] = node_id( ix+1 , iy+1 , iz+1 ); elem_node[7] = node_id( ix , iy+1 , iz+1 ); for (int ien = 0; ien < 8; ++ien) { AddToNodeProcsMMap(m_nodes_to_procs, elem_node[ien], proc); } } } }
void HexFixture::generate_mesh(std::vector<EntityId> & element_ids_on_this_processor, const CoordinateMapping & coordMap) { { //sort and unique the input elements std::vector<EntityId>::iterator ib = element_ids_on_this_processor.begin(); std::vector<EntityId>::iterator ie = element_ids_on_this_processor.end(); std::sort( ib, ie); ib = std::unique( ib, ie); element_ids_on_this_processor.erase(ib, ie); } m_bulk_data.modification_begin(); { // Declare the elements that belong on this process std::vector<EntityId>::iterator ib = element_ids_on_this_processor.begin(); const std::vector<EntityId>::iterator ie = element_ids_on_this_processor.end(); stk::mesh::EntityIdVector elem_nodes(8); for (; ib != ie; ++ib) { EntityId entity_id = *ib; size_t ix = 0, iy = 0, iz = 0; elem_x_y_z(entity_id, ix, iy, iz); elem_nodes[0] = node_id( ix , iy , iz ); elem_nodes[1] = node_id( ix+1 , iy , iz ); elem_nodes[2] = node_id( ix+1 , iy+1 , iz ); elem_nodes[3] = node_id( ix , iy+1 , iz ); elem_nodes[4] = node_id( ix , iy , iz+1 ); elem_nodes[5] = node_id( ix+1 , iy , iz+1 ); elem_nodes[6] = node_id( ix+1 , iy+1 , iz+1 ); elem_nodes[7] = node_id( ix , iy+1 , iz+1 ); stk::mesh::declare_element( m_bulk_data, m_elem_parts, elem_id( ix , iy , iz ) , elem_nodes); for (size_t i = 0; i<8; ++i) { EntityId node_id = elem_nodes[i]; stk::mesh::Entity const node = m_bulk_data.get_entity( stk::topology::NODE_RANK , node_id ); m_bulk_data.change_entity_parts(node, m_node_parts); DoAddNodeSharings(m_bulk_data, m_nodes_to_procs, node_id, node); ThrowRequireMsg( m_bulk_data.is_valid(node), "This process should know about the nodes that make up its element"); // Compute and assign coordinates to the node size_t nx = 0, ny = 0, nz = 0; node_x_y_z(elem_nodes[i], nx, ny, nz); Scalar * data = stk::mesh::field_data( m_coord_field , node ); coordMap.getNodeCoordinates(data, nx, ny, nz); } } } m_bulk_data.modification_end(); }
void find_data_observer::reply(msg const& m) { bdecode_node r = m.message.dict_find_dict("r"); if (!r) { #ifndef TORRENT_DISABLE_LOGGING get_observer()->log(dht_logger::traversal, "[%u] missing response dict" , algorithm()->id()); #endif timeout(); return; } bdecode_node id = r.dict_find_string("id"); if (!id || id.string_length() != 20) { #ifndef TORRENT_DISABLE_LOGGING get_observer()->log(dht_logger::traversal, "[%u] invalid id in response" , algorithm()->id()); #endif timeout(); return; } bdecode_node token = r.dict_find_string("token"); if (token) { static_cast<find_data*>(algorithm())->got_write_token( node_id(id.string_ptr()), token.string_value().to_string()); } traversal_observer::reply(m); done(); }
void default_actor_addressing::write(serializer* sink, const actor_ptr& ptr) { CPPA_REQUIRE(sink != nullptr); if (ptr == nullptr) { CPPA_LOGMF(CPPA_DEBUG, self, "serialized nullptr"); sink->begin_object("@0"); sink->end_object(); } else { // local actor? if (!ptr->is_proxy()) { get_actor_registry()->put(ptr->id(), ptr); } auto pinf = m_pinf; if (ptr->is_proxy()) { auto dptr = ptr.downcast<default_actor_proxy>(); if (dptr) pinf = dptr->process_info(); else CPPA_LOGMF(CPPA_ERROR, self, "downcast failed"); } sink->begin_object("@actor"); sink->write_value(ptr->id()); sink->write_value(pinf->process_id()); sink->write_raw(process_information::node_id_size, pinf->node_id().data()); sink->end_object(); } }
actor_ptr default_actor_addressing::read(deserializer* source) { CPPA_REQUIRE(source != nullptr); auto cname = source->seek_object(); if (cname == "@0") { CPPA_LOGMF(CPPA_DEBUG, self, "deserialized nullptr"); source->begin_object("@0"); source->end_object(); return nullptr; } else if (cname == "@actor") { process_information::node_id_type nid; source->begin_object(cname); auto aid = source->read<uint32_t>(); auto pid = source->read<uint32_t>(); source->read_raw(process_information::node_id_size, nid.data()); source->end_object(); // local actor? auto pinf = process_information::get(); if (pid == pinf->process_id() && nid == pinf->node_id()) { return get_actor_registry()->get(aid); } else { process_information tmp(pid, nid); return get_or_put(tmp, aid); } } else throw runtime_error("expected type name \"@0\" or \"@actor\"; " "found: " + cname); }
void QcOsmPbfReader::read_dense_nodes(OSMPBF::PrimitiveGroup primitive_group) { enter_node_transactions(); OSMPBF::DenseNodes dense_node = primitive_group.dense(); DeltaCodedInt64 node_id; DeltaCodedInt64 longitude; DeltaCodedInt64 latitude; int number_of_nodes = dense_node.id_size(); for (int i = 0; i < number_of_nodes; i++) { node_id.update(dense_node.id(i)); longitude.update(dense_node.lon(i)); latitude.update(dense_node.lat(i)); // qDebug() << " dense node" << node_id() << to_wgs(longitude(), latitude()); yield_node(node_id(), longitude(), latitude()); } // The storage pattern is: ((<keyid> <valid>)* '0' )* bool is_key = true; int key_id = 0; for (int i = 0, l = dense_node.keys_vals_size(), node_index = 0; i < l; i++) { int32_t key_val_id = dense_node.keys_vals(i); if (key_val_id) { if (is_key) key_id = key_val_id; else { // qDebug() << " attr" << node_index << m_string_table[key_id] << "=" << m_string_table[key_val_id]; yield_node_attribute(node_index, key_id, key_val_id); } is_key = not(is_key); } else node_index++; } if (m_read_metadatas and dense_node.has_denseinfo()) { // qDebug().nospace() << " with meta-info"; OSMPBF::DenseInfo dense_info = dense_node.denseinfo(); DeltaCodedInt64 timestamp; DeltaCodedInt64 changeset; DeltaCodedInt64 uid; DeltaCodedInt64 user_sid; for (int i = 0; i < number_of_nodes; i++) { int32_t version = dense_info.version(i); timestamp.update(to_timestamp(dense_info.timestamp(i))); changeset.update(dense_info.changeset(i)); uid.update(dense_info.uid(i)); user_sid.update(dense_info.user_sid(i)); // bool visible = dense_info.visible(i); // qDebug() << "Meta information:" << version << timestamp() << changeset() << uid() << user_sid(); // yield_node_metadata(i, version, timestamp(), changeset(), uid(), user_sid()); } } leave_node_transactions(); }
inline Node *node() { Node *n = new Node(); n->node_id = node_id(); n->hw_id = randr_string(5, "node-"); return n; }
/** * @param n an integer * @param m an integer * @param operators an array of point * @return an integer array */ vector<int> numIslands2(int n, int m, vector<Point>& operators) { vector<int> numbers; int number = 0; const vector<pair<int, int>> directions{{0, -1}, {0, 1}, {-1, 0}, {1, 0}}; unordered_map<int, int> set; for (const auto& oper : operators) { const auto& node = make_pair(oper.x, oper.y); set[node_id(node, m)] = node_id(node, m); ++number; for (const auto& d : directions) { const auto& neighbor = make_pair(oper.x + d.first, oper.y + d.second); if (neighbor.first >= 0 && neighbor.first < n && neighbor.second >= 0 && neighbor.second < m && set.find(node_id(neighbor, m)) != set.end()) { if (find_set(node_id(node, m), &set) != find_set(node_id(neighbor, m), &set)) { // Merge different islands. union_set(&set, node_id(node, m), node_id(neighbor, m)); --number; } } } numbers.emplace_back(number); } return numbers; }
/** * @param n an integer * @param m an integer * @param operators an array of point * @return an integer array */ vector<int> numIslands2(int m, int n, vector<pair<int, int>>& positions) { vector<int> numbers; int number = 0; const vector<pair<int, int>> directions{{0, -1}, {0, 1}, {-1, 0}, {1, 0}}; vector<int> set(m * n, -1); for (const auto& position : positions) { const auto& node = make_pair(position.first, position.second); set[node_id(node, n)] = node_id(node, n); ++number; for (const auto& d : directions) { const auto& neighbor = make_pair(position.first + d.first, position.second + d.second); if (neighbor.first >= 0 && neighbor.first < m && neighbor.second >= 0 && neighbor.second < n && set[node_id(neighbor, n)] != -1) { if (find_set(node_id(node, n), &set) != find_set(node_id(neighbor, n), &set)) { // Merge different islands, amortised time: O(log*k) ~= O(1) union_set(&set, node_id(node, n), node_id(neighbor, n)); --number; } } } numbers.emplace_back(number); } return numbers; }
vector<int> numIslands2(int m, int n, vector<pair<int, int>>& positions) { vector<int> numbers; int number = 0; vector<pair<int, int>> directions{{0, -1}, {0, 1}, {-1, 0}, {1, 0}}; unordered_map<int, int> set; for (auto& position : positions) { auto& node = make_pair(position.first, position.second); set[node_id(node, n)] = node_id(node, n); ++number; for (auto& d : directions) { auto& neighbor = make_pair(position.first + d.first, position.second + d.second); if (neighbor.first >= 0 && neighbor.first < m && neighbor.second >= 0 && neighbor.second < n && set.find(node_id(neighbor, n)) != set.end()) { if (find_set(node_id(node, n), &set) != find_set(node_id(neighbor, n), &set)) { // Merge different islands. union_set(&set, node_id(node, n), node_id(neighbor, n)); --number; } } } numbers.emplace_back(number); } return numbers; }
inline ProcessID *process_id() { ProcessID *pid = new ProcessID; pid->nid = node_id(); pid->pid = randr(); pid->time = randr(); return pid; }
node* node_op_new(const char* op, node* lhs, node* rhs) { node_op* nop = malloc(sizeof(node_op)); nop->type = NODE_OP; nop->lhs = lhs; nop->op = node_id(op, strlen(op)); nop->rhs = rhs; return (node*)nop; }
inline StorageDevice *storage_device() { StorageDevice *sd = new StorageDevice(); sd->device_id = device_id(); sd->node_id = node_id(); sd->model_name = randr_string(5, "sd-model-"); sd->local_address = randr_string(5, "sd-address-"); return sd; }
int process_information::compare(const process_information& other) const { int tmp = strncmp(reinterpret_cast<const char*>(node_id().data()), reinterpret_cast<const char*>(other.node_id().data()), node_id_size); if (tmp == 0) { if (m_process_id < other.process_id()) return -1; else if (m_process_id == other.process_id()) return 0; return 1; } return tmp; }
void RemoteNode::init_zock_() { VERIFY(ztx_ != nullptr); zock_.reset(new zmq::socket_t(*ztx_, ZMQ_DEALER)); ZUtils::socket_no_linger(*zock_); LOG_INFO(node_id() << ": connecting to " << uri()); zock_->connect(boost::lexical_cast<std::string>(uri()).c_str()); }
void traversal_algorithm::add_router_entries() { #ifdef TORRENT_DHT_VERBOSE_LOGGING TORRENT_LOG(traversal) << "[" << this << "] using router nodes to initiate traversal algorithm. " << std::distance(m_node.m_table.router_begin(), m_node.m_table.router_end()) << " routers"; #endif for (routing_table::router_iterator i = m_node.m_table.router_begin() , end(m_node.m_table.router_end()); i != end; ++i) { add_entry(node_id(0), *i, observer::flag_initial); } }
void RemoteNode::handle_(const Request& req, const bc::milliseconds& timeout_ms, ExtraSendFun* send_extra, ExtraRecvFun* recv_extra) { auto work = boost::make_shared<WorkItem>(req, send_extra, recv_extra); { LOCK(); queued_work_.push_back(work); notify_(); } switch (work->future.wait_for(timeout_ms)) { case boost::future_status::deferred: { LOG_ERROR(node_id() << ": " << work->request_desc << ", tag " << work->request_tag << ": future unexpectedly returned 'deferred' status"); drop_request_(*work); VERIFY(0 == "unexpected future_status 'deferred'"); } case boost::future_status::timeout: { LOG_INFO(node_id() << ": remote did not respond within " << timeout_ms << " milliseconds - giving up"); drop_request_(*work); throw RequestTimeoutException("request to remote node timed out"); } case boost::future_status::ready: { handle_response_(*work); break; } } }
/// Receives a payload virtual void receive(packet payload) { for(uint32_t j = 0; j < receiver_count(); ++j) { std::string recv_id = get_receiver(j)->node_id(); std::string src_id = payload.get_sender(); // If true we drop if(m_channel_condition->generate()) { ++m_counter[node_id()+"_"+src_id+"_to_"+recv_id+"_dropped"]; } else { ++m_counter[node_id()+"_"+src_id+"_to_"+recv_id+"_ok"]; // Deliver packet to receiver j forward(j, payload); } } }
double VoxelGrid<PointSourceType>::nearestNeighborDistance(PointSourceType query_point, float max_range) { Eigen::Vector3d node_id; node_id.setZero(); // Go through top of the octree to the bottom for (int i = octree_centroids_.size() - 1; i >= 0; i--) { nearestOctreeNodeSearch(query_point, node_id, i); } int voxel_id = node_id(0) + node_id(1) * vgrid_x_ + node_id(2) * vgrid_x_ * vgrid_y_; std::vector<int> &pid_list = points_id_[voxel_id]; float min_dist = FLT_MAX; float qx = query_point.x; float qy = query_point.y; float qz = query_point.z; //std::cout << "Voxel id = " << voxel_id << std::endl; //std::cout << "PID LIST SIZE = " << pid_list.size() << std::endl; for (int i = 0; i < pid_list.size(); i++) { float tx = source_cloud_->points[pid_list[i]].x - qx; float ty = source_cloud_->points[pid_list[i]].y - qy; float tz = source_cloud_->points[pid_list[i]].z - qz; float cur_dist = sqrt(tx * tx + ty * ty + tz * tz); if (cur_dist < min_dist) { min_dist = cur_dist; } } if (min_dist > max_range) min_dist = DBL_MAX; return static_cast<double>(min_dist); }
void doCheckAccept (Node& remote_node, IP::Endpoint const& remote_endpoint) { // Find our link to the remote node Links::iterator iter (std::find_if (m_links.begin (), m_links.end(), is_remote_endpoint (remote_endpoint))); // See if the logic closed the connection if (iter == m_links.end()) return; // Post notifications m_network.post (std::bind (&Logic::on_handshake, &remote_node.logic(), iter->local_endpoint(), node_id(), false)); m_network.post (std::bind (&Logic::on_handshake, &logic(), remote_endpoint, remote_node.node_id(), false)); }
void traversal_algorithm::add_router_entries() { #ifndef TORRENT_DISABLE_LOGGING dht_observer* logger = get_node().observer(); if (logger != nullptr && logger->should_log(dht_logger::traversal)) { logger->log(dht_logger::traversal , "[%p] using router nodes to initiate traversal algorithm %d routers" , static_cast<void*>(this), int(std::distance(m_node.m_table.begin(), m_node.m_table.end()))); } #endif for (auto const& n : m_node.m_table) add_entry(node_id(), n, observer::flag_initial); }
std::set<Region> NetDBase::connections_regions( const std::string & seed , int depth , double threshold , bool add_self ) { std::set<Region> r; int nid = node_id( seed ); if ( nid == 0 || ! locdb ) return r; sql.bind_int( stmt_fetch_connections , ":n" , nid ); while ( sql.step( stmt_fetch_connections ) ) { std::string t = sql.get_text( stmt_fetch_connections , 0 ) ; if ( add_self || seed != t ) r.insert( locdb->get_region( grp , t ) ); } sql.reset( stmt_fetch_connections ); return r; }
std::set<std::string> NetDBase::connections( const std::string & seed , int depth , double threshold , bool add_self ) { std::set<std::string> r; int nid = node_id( seed ); if ( nid == 0 ) return r; sql.bind_int( stmt_fetch_connections , ":n" , nid ); while ( sql.step( stmt_fetch_connections ) ) { std::string t = sql.get_text( stmt_fetch_connections , 0 ) ; if ( add_self || t != seed ) r.insert( t ); } sql.reset( stmt_fetch_connections ); return r; }
void HexFixture::fill_node_map( int p_rank) { std::vector<EntityId> element_ids_on_this_processor; const size_t p_size = m_bulk_data.parallel_size(); const size_t num_elems = m_nx * m_ny * m_nz ; const EntityId beg_elem = 1 + ( num_elems * p_rank ) / p_size ; const EntityId end_elem = 1 + ( num_elems * ( p_rank + 1 ) ) / p_size ; for ( EntityId i = beg_elem; i != end_elem; ++i) { element_ids_on_this_processor.push_back(i); } //sort and unique the input elements std::vector<EntityId>::iterator ib = element_ids_on_this_processor.begin(); std::vector<EntityId>::iterator ie = element_ids_on_this_processor.end(); std::sort( ib, ie); ib = std::unique( ib, ie); element_ids_on_this_processor.erase(ib, ie); std::set<EntityId> nodes_on_proc; ib = element_ids_on_this_processor.begin(); ie = element_ids_on_this_processor.end(); for (; ib != ie; ++ib) { EntityId entity_id = *ib; size_t ix = 0, iy = 0, iz = 0; elem_x_y_z(entity_id, ix, iy, iz); stk::mesh::EntityId elem_node[8] ; elem_node[0] = node_id( ix , iy , iz ); elem_node[1] = node_id( ix+1 , iy , iz ); elem_node[2] = node_id( ix+1 , iy+1 , iz ); elem_node[3] = node_id( ix , iy+1 , iz ); elem_node[4] = node_id( ix , iy , iz+1 ); elem_node[5] = node_id( ix+1 , iy , iz+1 ); elem_node[6] = node_id( ix+1 , iy+1 , iz+1 ); elem_node[7] = node_id( ix , iy+1 , iz+1 ); for (int ien = 0; ien < 8; ++ien) { AddToNodeProcsMMap(m_nodes_to_procs, elem_node[ien], p_rank); } } }
virtual void receive(packet payload) { m_last_packet = payload; m_new_packet = true; if (m_decoder->is_complete()) { std::string counter_id = node_id()+"_waste_from_"+payload.get_sender(); ++m_counter[counter_id]; } else { std::copy(payload.data_begin(), payload.data_end(), &m_decode_buffer[0]); uint32_t rank = m_decoder->rank(); m_decoder->read_payload(&m_decode_buffer[0]); if (rank < m_decoder->rank()) { std::string counter_id = node_id()+"_innovative_from_"+payload.get_sender(); ++m_counter[counter_id]; } else { std::string counter_id = node_id()+"_linear_dept_from_"+payload.get_sender(); ++m_counter[counter_id]; } } }
void RemoteNode::handle_response_(WorkItem& work) { const vfsprotocol::ResponseType rsp_type = work.future.get(); switch (rsp_type) { case vfsprotocol::ResponseType::Ok: // all is well break; case vfsprotocol::ResponseType::ObjectNotRunningHere: LOG_INFO(node_id() << ": volume not present on that node"); throw vd::VolManager::VolumeDoesNotExistException("volume not present on node", work.request_desc); break; case vfsprotocol::ResponseType::UnknownRequest: LOG_WARN(node_id() << ": got an UnknownRequest response status in response to " << work.request_desc); // handle differently once we need to take care of backward compatibility. throw ProtocolError("Remote sent UnknownRequest response status", work.request_desc); break; case vfsprotocol::ResponseType::Timeout: LOG_ERROR(node_id() << ": got a Timeout response status in response to " << work.request_desc); throw RemoteTimeoutException("Remote sent timeout status", work.request_desc); break; default: LOG_ERROR(node_id() << ": " << work.request_desc << " failed, remote returned status " << vfsprotocol::response_type_to_string(rsp_type) << " (" << static_cast<uint32_t>(rsp_type) << ")"); throw fungi::IOException("Remote operation failed", work.request_desc); break; } }
void CpuAgent::InitCacheList() { // Get CPU cache information. cache_props_.resize(properties_.NumCaches); if (HSAKMT_STATUS_SUCCESS != hsaKmtGetNodeCacheProperties(node_id(), properties_.CComputeIdLo, properties_.NumCaches, &cache_props_[0])) { cache_props_.clear(); } else { // Only store CPU D-cache. for (size_t cache_id = 0; cache_id < cache_props_.size(); ++cache_id) { const HsaCacheType type = cache_props_[cache_id].CacheType; if (type.ui32.CPU != 1 || type.ui32.Instruction == 1) { cache_props_.erase(cache_props_.begin() + cache_id); --cache_id; } } } }