void MultiStatus::AddAll(IStatus::Pointer status) { poco_assert(status); std::vector<IStatus::Pointer> statuses(status->GetChildren()); for (unsigned int i = 0; i < statuses.size(); i++) { this->Add(statuses[i]); } }
void AstSipPeer::sDndStatusEvent(const QVariantMap &event) { if(event.contains("Status")) { uint statusNum = event.value("Status").toUInt(); AsteriskManager::ExtStatuses statuses(statusNum); _isDndOn = !statuses.testFlag(AsteriskManager::NotInUse); emit sUpdated(this); emit sigDndStatusEvent(this, event, _isDndOn); } }
void mpi_process_group::poll_requests(int block) const { int size = impl_->requests.size(); if (size==0) return; std::vector<MPI_Status> statuses(size); std::vector<int> indices(size); while (true) { MPI_Testsome(impl_->requests.size(),&impl_->requests[0], &size,&indices[0],&statuses[0]); if (size==0) return; // no message waiting // remove handled requests before we get the chance to be recursively called if (size) { std::vector<MPI_Request> active_requests; std::size_t i=0; int j=0; for (;i< impl_->requests.size() && j< size; ++i) { if (int(i)==indices[j]) // release the dealt-with request ++j; else // copy and keep the request active_requests.push_back(impl_->requests[i]); } while (i < impl_->requests.size()) active_requests.push_back(impl_->requests[i++]); impl_->requests.swap(active_requests); } optional<std::pair<int, int> > result; for (int i=0;i < size; ++i) { std::pair<int, int> decoded = decode_tag(statuses[i].MPI_TAG); block_type* block = impl_->blocks[decoded.first]; BOOST_ASSERT (decoded.second < static_cast<int>(block->triggers.size()) && block->triggers[decoded.second]); // We have a trigger for this message; use it trigger_receive_context old_context = impl_->trigger_context; impl_->trigger_context = trc_irecv_out_of_band; block->triggers[decoded.second]->receive(*this, statuses[i].MPI_SOURCE, decoded.second, impl_->trigger_context, decoded.first); impl_->trigger_context = old_context; } } }
void contractor_parallel_all::prune(box & b, SMTConfig & config) { DREAL_LOG_DEBUG << "contractor_parallel_all::prune"; DREAL_LOG_FATAL << "-------------------------------------------------------------"; // TODO(soonhok): implement this if (m_vec.size() == 0) { // Do nothing for empty vec return; } // 1. Make n copies of box b vector<box> boxes(m_vec.size(), b); vector<pruning_thread_status> statuses(m_vec.size(), pruning_thread_status::READY); m_index = -1; // DREAL_LOG_FATAL << "parallel: Boxes are copied"; // 2. Trigger execution with each contractor and a copied box vector<interruptible_thread> threads; atomic_int tasks_to_run(m_vec.size()); // DREAL_LOG_FATAL << "parallel: tasks to run = " << tasks_to_run.load(); for (unsigned i = 0; i < m_vec.size(); ++i) { DREAL_LOG_FATAL << "parallel : thread " << i << " / " << (tasks_to_run.load() - 1) << " spawning..."; threads.emplace_back(parallel_helper_fn, i, m_vec[i], boxes[i], config, statuses[i], m_mutex, m_cv, m_index, tasks_to_run); DREAL_LOG_FATAL << "parallel : thread " << i << " / " << (tasks_to_run.load() - 1) << " spawned..."; } DREAL_LOG_FATAL << "parallel : " << m_vec.size() << " thread(s) got created"; while (true) { DREAL_LOG_FATAL << "parallel: waiting for the lock"; unique_lock<mutex> lk(m_mutex); DREAL_LOG_FATAL << "parallel: get a lock. " << tasks_to_run.load() << " tasks to go"; if (tasks_to_run.load() == 0) { break; } DREAL_LOG_FATAL << "parallel: WAIT for CV." << tasks_to_run.load() << " tasks to go";; m_index = -1; m_cv.wait(lk, [&]() { return m_index != -1; }); DREAL_LOG_FATAL << "parallel: wake up" << tasks_to_run.load(); pruning_thread_status const & s = statuses[m_index]; // DREAL_LOG_FATAL << "parallel: thread " << m_index << " " << s; if (s == pruning_thread_status::UNSAT || s == pruning_thread_status::EXCEPTION) { // Interrupt all the rest threads for (unsigned i = 0; i < statuses.size(); i++) { if (i - m_index != 0 && (statuses[i] == pruning_thread_status::READY || statuses[i] == pruning_thread_status::RUNNING)) { threads[i].interrupt(); } } if (s == pruning_thread_status::UNSAT) { DREAL_LOG_FATAL << "parallel: " << m_index << " got UNSAT"; b.set_empty(); m_input.union_with(m_vec[m_index].input()); m_output.union_with(m_vec[m_index].output()); unordered_set<shared_ptr<constraint>> const & used_ctrs = m_vec[m_index].used_constraints(); m_used_constraints.insert(used_ctrs.begin(), used_ctrs.end()); lk.unlock(); for (unsigned i = 0; i < m_vec.size(); i++) { threads[i].join(); } DREAL_LOG_FATAL << "parallel: return UNSAT"; return; } if (s == pruning_thread_status::EXCEPTION) { DREAL_LOG_FATAL << "parallel: " << m_index << " got EXCEPTION"; lk.unlock(); for (unsigned i = 0; i < m_vec.size(); i++) { threads[i].join(); } DREAL_LOG_FATAL << "parallel: throw exception"; throw contractor_exception("exception during parallel contraction"); } } else { // if (s != pruning_thread_status::SAT) { // // DREAL_LOG_FATAL << "parallel: " << m_index << " got " << s; // // DREAL_LOG_FATAL << "parallel: " << m_index << " got " << statuses[m_index]; assert(s == pruning_thread_status::SAT); // } // if (threads[m_index].joinable()) { // threads[m_index].join(); // } // DREAL_LOG_FATAL << "parallel: " << m_index << " got SAT"; // Why? // - Not READY/RUNNING: It's a job already done. // - Not UNSAT/EXCEPTION: already handled above. // - Not KILLED: There must be one which kill the killed // job, and this loop stops after handling // the first one } } // Assertion: All of them got SAT // for (pruning_thread_status const & s : statuses) { // assert(s == pruning_thread_status::SAT); // } // DREAL_LOG_FATAL << "All of them are SAT"; b = boxes[0]; for (unsigned i = 0; i < m_vec.size(); i++) { contractor const & c = m_vec[i]; b.intersect(boxes[i]); m_input.union_with(c.input()); m_output.union_with(c.output()); unordered_set<shared_ptr<constraint>> const & used_ctrs = c.used_constraints(); m_used_constraints.insert(used_ctrs.begin(), used_ctrs.end()); if (b.is_empty()) { // DREAL_LOG_FATAL << "Found an empty while intersecting..."; for (unsigned i = 0; i < m_vec.size(); i++) { // if (threads[i].joinable()) { // DREAL_LOG_FATAL << "Try to join " << i << "..."; threads[i].join(); // DREAL_LOG_FATAL << "Try to join " << i << "... done"; // } } // DREAL_LOG_FATAL << "parallel: return UNSAT"; return; } } // DREAL_LOG_FATAL << "Intersection is nonempty exiting..."; for (unsigned i = 0; i < m_vec.size(); i++) { // if (threads[i].joinable()) { threads[i].join(); // } } // DREAL_LOG_FATAL << "parallel: return SAT"; return; }
void test_bulk_route_set() { SWSS_LOG_ENTER(); swss::Logger::getInstance().setMinPrio(swss::Logger::SWSS_NOTICE); clearDB(); meta_init_db(); redis_clear_switch_ids(); swss::Logger::getInstance().setMinPrio(swss::Logger::SWSS_DEBUG); sai_status_t status; sai_route_api_t *sai_route_api = NULL; sai_switch_api_t *sai_switch_api = NULL; sai_api_query(SAI_API_ROUTE, (void**)&sai_route_api); sai_api_query(SAI_API_SWITCH, (void**)&sai_switch_api); uint32_t count = 3; std::vector<sai_route_entry_t> routes; std::vector<sai_attribute_t> attrs; uint32_t index = 15; sai_attribute_t swattr; swattr.id = SAI_SWITCH_ATTR_INIT_SWITCH; swattr.value.booldata = true; sai_object_id_t switch_id; status = sai_switch_api->create_switch(&switch_id, 1, &swattr); ASSERT_SUCCESS("Failed to create switch"); std::vector<std::vector<sai_attribute_t>> route_attrs; std::vector<sai_attribute_t *> route_attrs_array; std::vector<uint32_t> route_attrs_count; for (uint32_t i = index; i < index + count; ++i) { sai_route_entry_t route_entry; // virtual router sai_object_id_t vr = create_dummy_object_id(SAI_OBJECT_TYPE_VIRTUAL_ROUTER); object_reference_insert(vr); sai_object_meta_key_t meta_key_vr = { .objecttype = SAI_OBJECT_TYPE_VIRTUAL_ROUTER, .objectkey = { .key = { .object_id = vr } } }; std::string vr_key = sai_serialize_object_meta_key(meta_key_vr); ObjectAttrHash[vr_key] = { }; // next hop sai_object_id_t hop = create_dummy_object_id(SAI_OBJECT_TYPE_NEXT_HOP); object_reference_insert(hop); sai_object_meta_key_t meta_key_hop = { .objecttype = SAI_OBJECT_TYPE_NEXT_HOP, .objectkey = { .key = { .object_id = hop } } }; std::string hop_key = sai_serialize_object_meta_key(meta_key_hop); ObjectAttrHash[hop_key] = { }; route_entry.destination.addr_family = SAI_IP_ADDR_FAMILY_IPV4; route_entry.destination.addr.ip4 = htonl(0x0a000000 | i); route_entry.destination.mask.ip4 = htonl(0xffffffff); route_entry.vr_id = vr; route_entry.switch_id = switch_id; route_entry.destination.addr_family = SAI_IP_ADDR_FAMILY_IPV4; routes.push_back(route_entry); std::vector<sai_attribute_t> list(2); sai_attribute_t &attr1 = list[0]; sai_attribute_t &attr2 = list[1]; attr1.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; attr1.value.oid = hop; attr2.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; attr2.value.s32 = SAI_PACKET_ACTION_FORWARD; route_attrs.push_back(list); route_attrs_count.push_back(2); } for (size_t j = 0; j < route_attrs.size(); j++) { route_attrs_array.push_back(route_attrs[j].data()); } std::vector<sai_status_t> statuses(count); status = sai_bulk_create_route_entry(count, routes.data(), route_attrs_count.data(), route_attrs_array.data() , SAI_BULK_OP_TYPE_INGORE_ERROR, statuses.data()); ASSERT_SUCCESS("Failed to create route"); for (size_t j = 0; j < statuses.size(); j++) { status = statuses[j]; ASSERT_SUCCESS("Failed to create route # %zu", j); } for (uint32_t i = index; i < index + count; ++i) { sai_attribute_t attr; attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; attr.value.s32 = SAI_PACKET_ACTION_DROP; status = sai_route_api->set_route_entry_attribute(&routes[i - index], &attr); attrs.push_back(attr); ASSERT_SUCCESS("Failed to set route"); } statuses.clear(); statuses.resize(attrs.size()); for (auto &attr: attrs) { attr.value.s32 = SAI_PACKET_ACTION_FORWARD; } status = sai_bulk_set_route_entry_attribute( count, routes.data(), attrs.data(), SAI_BULK_OP_TYPE_INGORE_ERROR, statuses.data()); ASSERT_SUCCESS("Failed to bulk set route"); for (auto s: statuses) { status = s; ASSERT_SUCCESS("Failed to bulk set route on one of the routes"); } // TODO we need to add consumer producer test here to see // if after consume we get pop we get expectd parameters // Remove route entry status = sai_bulk_remove_route_entry(count, routes.data(), SAI_BULK_OP_TYPE_INGORE_ERROR, statuses.data()); ASSERT_SUCCESS("Failed to bulk remove route entry"); }
void test_bulk_fdb_create() { SWSS_LOG_ENTER(); swss::Logger::getInstance().setMinPrio(swss::Logger::SWSS_NOTICE); clearDB(); meta_init_db(); redis_clear_switch_ids(); swss::Logger::getInstance().setMinPrio(swss::Logger::SWSS_DEBUG); sai_status_t status; sai_route_api_t *sai_fdb_api = NULL; sai_switch_api_t *sai_switch_api = NULL; sai_api_query(SAI_API_FDB, (void**)&sai_fdb_api); sai_api_query(SAI_API_SWITCH, (void**)&sai_switch_api); uint32_t count = 3; std::vector<sai_fdb_entry_t> fdbs; uint32_t index = 15; sai_attribute_t swattr; swattr.id = SAI_SWITCH_ATTR_INIT_SWITCH; swattr.value.booldata = true; sai_object_id_t switch_id; status = sai_switch_api->create_switch(&switch_id, 1, &swattr); ASSERT_SUCCESS("Failed to create switch"); std::vector<std::vector<sai_attribute_t>> fdb_attrs; std::vector<sai_attribute_t *> fdb_attrs_array; std::vector<uint32_t> fdb_attrs_count; for (uint32_t i = index; i < index + count; ++i) { // virtual router sai_object_id_t vr = create_dummy_object_id(SAI_OBJECT_TYPE_VIRTUAL_ROUTER); object_reference_insert(vr); sai_object_meta_key_t meta_key_vr = { .objecttype = SAI_OBJECT_TYPE_VIRTUAL_ROUTER, .objectkey = { .key = { .object_id = vr } } }; std::string vr_key = sai_serialize_object_meta_key(meta_key_vr); ObjectAttrHash[vr_key] = { }; // bridge port sai_object_id_t bridge_port = create_dummy_object_id(SAI_OBJECT_TYPE_BRIDGE_PORT); object_reference_insert(bridge_port); sai_object_meta_key_t meta_key_bridge_port = { .objecttype = SAI_OBJECT_TYPE_BRIDGE_PORT, .objectkey = { .key = { .object_id = bridge_port } } }; std::string bridge_port_key = sai_serialize_object_meta_key(meta_key_bridge_port); ObjectAttrHash[bridge_port_key] = { }; sai_fdb_entry_t fdb_entry; fdb_entry.switch_id = switch_id; memset(fdb_entry.mac_address, 0, sizeof(sai_mac_t)); fdb_entry.mac_address[0] = 0xD; fdb_entry.bridge_type = SAI_FDB_ENTRY_BRIDGE_TYPE_1Q; fdb_entry.vlan_id = (unsigned short)(1011 + i); fdb_entry.bridge_id = SAI_NULL_OBJECT_ID; fdbs.push_back(fdb_entry); std::vector<sai_attribute_t> attrs; sai_attribute_t attr; attr.id = SAI_FDB_ENTRY_ATTR_TYPE; attr.value.s32 = (i % 2) ? SAI_FDB_ENTRY_TYPE_DYNAMIC : SAI_FDB_ENTRY_TYPE_STATIC; attrs.push_back(attr); attr.id = SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID; attr.value.oid = bridge_port; attrs.push_back(attr); attr.id = SAI_FDB_ENTRY_ATTR_PACKET_ACTION; attr.value.s32 = SAI_PACKET_ACTION_FORWARD; attrs.push_back(attr); fdb_attrs.push_back(attrs); fdb_attrs_count.push_back((unsigned int)attrs.size()); } for (size_t j = 0; j < fdb_attrs.size(); j++) { fdb_attrs_array.push_back(fdb_attrs[j].data()); } std::vector<sai_status_t> statuses(count); status = sai_bulk_create_fdb_entry(count, fdbs.data(), fdb_attrs_count.data(), fdb_attrs_array.data() , SAI_BULK_OP_TYPE_INGORE_ERROR, statuses.data()); ASSERT_SUCCESS("Failed to create fdb"); for (size_t j = 0; j < statuses.size(); j++) { status = statuses[j]; ASSERT_SUCCESS("Failed to create route # %zu", j); } // Remove route entry status = sai_bulk_remove_fdb_entry(count, fdbs.data(), SAI_BULK_OP_TYPE_INGORE_ERROR, statuses.data()); ASSERT_SUCCESS("Failed to bulk remove route entry"); }
void test_bulk_next_hop_group_member_create() { SWSS_LOG_ENTER(); swss::Logger::getInstance().setMinPrio(swss::Logger::SWSS_NOTICE); clearDB(); meta_init_db(); redis_clear_switch_ids(); auto consumerThreads = new std::thread(bulk_nhgm_consumer_worker); swss::Logger::getInstance().setMinPrio(swss::Logger::SWSS_DEBUG); sai_status_t status; sai_next_hop_group_api_t *sai_next_hop_group_api = NULL; sai_switch_api_t *sai_switch_api = NULL; sai_api_query(SAI_API_NEXT_HOP_GROUP, (void**)&sai_next_hop_group_api); sai_api_query(SAI_API_SWITCH, (void**)&sai_switch_api); uint32_t count = 3; std::vector<sai_route_entry_t> routes; std::vector<sai_attribute_t> attrs; sai_attribute_t swattr; swattr.id = SAI_SWITCH_ATTR_INIT_SWITCH; swattr.value.booldata = true; sai_object_id_t switch_id; status = sai_switch_api->create_switch(&switch_id, 1, &swattr); ASSERT_SUCCESS("Failed to create switch"); std::vector<std::vector<sai_attribute_t>> nhgm_attrs; std::vector<sai_attribute_t *> nhgm_attrs_array; std::vector<uint32_t> nhgm_attrs_count; // next hop group sai_object_id_t hopgroup = create_dummy_object_id(SAI_OBJECT_TYPE_NEXT_HOP_GROUP); object_reference_insert(hopgroup); sai_object_meta_key_t meta_key_hopgruop = { .objecttype = SAI_OBJECT_TYPE_NEXT_HOP_GROUP, .objectkey = { .key = { .object_id = hopgroup } } }; std::string hopgroup_key = sai_serialize_object_meta_key(meta_key_hopgruop); ObjectAttrHash[hopgroup_key] = { }; sai_object_id_t hopgroup_vid = translate_rid_to_vid(hopgroup, switch_id); for (uint32_t i = 0; i < count; ++i) { // next hop sai_object_id_t hop = create_dummy_object_id(SAI_OBJECT_TYPE_NEXT_HOP); object_reference_insert(hop); sai_object_meta_key_t meta_key_hop = { .objecttype = SAI_OBJECT_TYPE_NEXT_HOP, .objectkey = { .key = { .object_id = hop } } }; std::string hop_key = sai_serialize_object_meta_key(meta_key_hop); ObjectAttrHash[hop_key] = { }; sai_object_id_t hop_vid = translate_rid_to_vid(hop, switch_id); std::vector<sai_attribute_t> list(2); sai_attribute_t &attr1 = list[0]; sai_attribute_t &attr2 = list[1]; attr1.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID; attr1.value.oid = hopgroup_vid; attr2.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID; attr2.value.oid = hop_vid; nhgm_attrs.push_back(list); nhgm_attrs_count.push_back(2); } for (size_t j = 0; j < nhgm_attrs.size(); j++) { nhgm_attrs_array.push_back(nhgm_attrs[j].data()); } std::vector<sai_status_t> statuses(count); std::vector<sai_object_id_t> object_id(count); sai_bulk_create_next_hop_group_members(switch_id, count, nhgm_attrs_count.data(), nhgm_attrs_array.data() , SAI_BULK_OP_TYPE_INGORE_ERROR, object_id.data(), statuses.data()); ASSERT_SUCCESS("Failed to bulk create nhgm"); for (size_t j = 0; j < statuses.size(); j++) { status = statuses[j]; ASSERT_SUCCESS("Failed to create nhgm # %zu", j); } consumerThreads->join(); delete consumerThreads; // check the created nhgm for (size_t i = 0; i < created_next_hop_group_member.size(); i++) { auto& created = created_next_hop_group_member[i]; auto& created_attrs = std::get<2>(created); assert(created_attrs.size() == 2); assert(created_attrs[1].value.oid == nhgm_attrs[i][1].value.oid); } status = sai_bulk_remove_next_hop_group_members(count, object_id.data(), SAI_BULK_OP_TYPE_INGORE_ERROR, statuses.data()); ASSERT_SUCCESS("Failed to bulk remove nhgm"); }