std::string to_string(const group& x) { if (x == invalid_group) return "<invalid-group>"; std::string result = x.get()->module().name(); result += ":"; result += x.get()->identifier(); return result; }
void local_actor::leave(const group& what) { CAF_LOG_TRACE(CAF_TSARG(what)); if (what == invalid_group) { return; } if (detach(abstract_group::subscription_token{what.ptr()}) > 0) { what->unsubscribe(address()); } }
bool search_reqs( group gp, const std::string &txt ) { return std::any_of( gp.begin(), gp.end(), [&]( const typename group::value_type & opts ) { return std::any_of( opts.begin(), opts.end(), [&]( const typename group::value_type::value_type & e ) { return lcmatch( e.to_string(), txt ); } ); } ); }
void local_actor::join(const group& what) { CAF_LOG_TRACE(CAF_TSARG(what)); if (what == invalid_group) { return; } abstract_group::subscription_token tk{what.ptr()}; std::unique_lock<std::mutex> guard{m_mtx}; if (detach_impl(tk, m_attachables_head, true, true) == 0) { auto ptr = what->subscribe(address()); if (ptr) { attach_impl(ptr); } } }
/// Causes this actor to subscribe to the group `what`. /// The group will be unsubscribed if the actor finishes execution. void join(const group& what) { CAF_LOG_TRACE(CAF_ARG(what)); if (what == invalid_group) return; if (what->subscribe(dptr()->ctrl())) subscriptions_.emplace(what); }
void local_actor::join(const group& what) { CPPA_LOG_TRACE(CPPA_TSARG(what)); if (what && m_subscriptions.count(what) == 0) { CPPA_LOG_DEBUG("join group: " << to_string(what)); m_subscriptions.insert(std::make_pair(what, what->subscribe(this))); } }
/** \brief ctor * * - initialize root node */ node_graph(void): root_group_(0), synth_count_(0), group_count_(1), node_set(node_set_type::bucket_traits(node_buckets, node_set_bucket_count)), generated_id(-2) { root_group_.add_ref(); node_set.insert(root_group_); }
error inspect(serializer& f, group& x) { std::string mod_name; auto ptr = x.get(); if (!ptr) return f(mod_name); mod_name = ptr->module().name(); auto e = f(mod_name); return e ? e : ptr->save(f); }
inline void group::open(group const& other, std::string const& name) { if (hid_ >= 0) { throw error("h5xx::group object is already in use"); } if (exists_group(other, name)) { hid_ = H5Gopen(other.hid(), name.c_str(), H5P_DEFAULT); } else { hid_t lcpl_id = H5Pcreate(H5P_LINK_CREATE); // create group creation property list H5Pset_create_intermediate_group(lcpl_id, 1); // set intermediate link creation hid_ = H5Gcreate(other.hid(), name.c_str(), lcpl_id, H5P_DEFAULT, H5P_DEFAULT); } if (hid_ < 0){ throw error("creating or opening group \"" + name + "\""); } }
/** * return true if group "name" exists in group "grp" */ inline bool exists_group(group const& grp, std::string const& name) { hid_t hid = grp.hid(); H5E_BEGIN_TRY { hid = H5Gopen(hid, name.c_str(), H5P_DEFAULT); if (hid > 0) { H5Gclose(hid); } } H5E_END_TRY return (hid > 0); }
void h5_write(group g, std::string const& name, std::string const& value) { datatype strdatatype = H5Tcopy(H5T_C_S1); // auto status = H5Tset_size (strdatatype, H5T_VARIABLE); //auto status = H5Tset_size(strdatatype, value.size() + 1); H5Tset_size(strdatatype, value.size() + 1); dataspace space = H5Screate(H5S_SCALAR); dataset ds = g.create_dataset(name, strdatatype, space); auto err = H5Dwrite(ds, strdatatype, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void*)(value.c_str())); if (err < 0) TRIQS_RUNTIME_ERROR << "Error writing the string named" << name << " in the group" << g.name(); }
void parallel_for_workitem(const group<Dimensions> &g, ParallelForFunctor f) { #if defined(_OPENMP) && (!defined(TRISYCL_NO_BARRIER) && !defined(_MSC_VER)) /* To implement barriers With OpenMP, one thread is created for each work-item in the group and thus an OpenMP barrier has the same effect of an OpenCL barrier executed by the work-items in a workgroup The issue is that the parallel_for_workitem() execution is slow even when nd_item::barrier() is not used */ range<Dimensions> l_r = g.get_nd_range().get_local_range(); id<Dimensions> id_l_r { l_r }; auto tot = l_r.size(); if constexpr (Dimensions == 1) { #pragma omp parallel for collapse(1) schedule(static) num_threads(tot) for (size_t i = 0; i < l_r.get(0); ++i) { T_Item index{g.get_nd_range()}; index.set_local(i); index.set_global(index.get_local_id() + id_l_r * g.get_id()); f(index); } } else if constexpr (Dimensions == 2) {
void h5_read(group g, std::string const& name, std::string& value) { dataset ds = g.open_dataset(name); h5::dataspace d_space = H5Dget_space(ds); int rank = H5Sget_simple_extent_ndims(d_space); if (rank != 0) TRIQS_RUNTIME_ERROR << "Reading a string and got rank !=0"; size_t size = H5Dget_storage_size(ds); datatype strdatatype = H5Tcopy(H5T_C_S1); H5Tset_size(strdatatype, size); //auto status = H5Tset_size(strdatatype, size); // auto status = H5Tset_size (strdatatype, H5T_VARIABLE); std::vector<char> buf(size + 1, 0x00); auto err = H5Dread(ds, strdatatype, H5S_ALL, H5S_ALL, H5P_DEFAULT, &buf[0]); if (err < 0) TRIQS_RUNTIME_ERROR << "Error reading the string named" << name << " in the group" << g.name(); value = ""; value.append(&(buf.front())); }
object::object(const group& object_) : object_handle_(object_.native_handle()) { }
~node_graph() { assert(root_group_.child_count() == 0); }
data_callback::data_callback( group& grp ) : group_ptr_( grp.item_mgt() ) , owner_(grp) { }
/** \brief ctor * * - initialize root node */ node_graph(void): node_set(node_set_type::bucket_traits(node_buckets, node_set_bucket_count)) { node_set.insert(root_group_); root_group_.add_ref(); }
/// Causes this actor to leave the group `what`. void leave(const group& what) { CAF_LOG_TRACE(CAF_ARG(what)); if (subscriptions_.erase(what) > 0) what->unsubscribe(dptr()->ctrl()); }
void unmount(const group& mount_point) { hdf5::unmount(mount_point.native_handle()); }
group mount(const file& mounted_file, const group& mount_point) { return group(hdf5::mount(mounted_file.native_handle(), mount_point.native_handle())); }
HOT successor_container fill_queue_recursive(group & g, successor_container const & successors_from_parent, size_t previous_activation_limit) { assert (g.has_synth_children()); typedef server_node_list::reverse_iterator r_iterator; successor_container successors(successors_from_parent); size_t children = g.child_count(); sequential_child_list sequential_children; sequential_children.reserve(g.child_synth_count); for (r_iterator it = g.child_nodes.rbegin(); it != g.child_nodes.rend(); ++it) { server_node & node = *it; if (node.is_synth()) { r_iterator end_of_node = it; --end_of_node; // one element behind the last std::size_t node_count = 1; // we fill the child nodes in reverse order to an array for(;;) { sequential_children.push_back(&*it); ++it; if (it == g.child_nodes.rend()) break; // we found the beginning of this group if (!it->is_synth()) break; // we hit a child group, later we may want to add it's nodes, too? ++node_count; } --it; // we iterated one element too far, so we need to go back to the previous element assert(sequential_children.size() == node_count); auto seq_it = sequential_children.rbegin(); int activation_limit = get_previous_activation_count(it, g.child_nodes.rend(), previous_activation_limit); thread_queue_item * q_item = q->allocate_queue_item(queue_node(std::move(queue_node_data(static_cast<abstract_synth*>(*seq_it++))), node_count), successors, activation_limit); queue_node & q_node = q_item->get_job(); // now we can add all nodes sequentially for(;seq_it != sequential_children.rend(); ++seq_it) q_node.add_node(static_cast<abstract_synth*>(*seq_it)); sequential_children.clear(); assert(q_node.size() == node_count); /* advance successor list */ successors = successor_container(1); successors[0] = q_item; if (activation_limit == 0) q->add_initially_runnable(q_item); children -= node_count; } else { abstract_group & grp = static_cast<abstract_group&>(node); if (grp.has_synth_children()) { int activation_limit = get_previous_activation_count(it, g.child_nodes.rend(), previous_activation_limit); successors = fill_queue_recursive(grp, successors, activation_limit); } children -= 1; } } assert(children == 0); return successors; }
void fill_queue(group & root_group) { if (root_group.has_synth_children()) fill_queue_recursive(root_group, successor_container(0), 0); }
void create_world( rendering_assets_type& rendering_assets, black_label::rendering::view& view ) { static group all_statics; static auto get_vec3 = [](const boost::property_tree::ptree& root) { assert(3 == root.size()); auto first = root.begin(); auto x = (*first++).second.get<float>(""); auto y = (*first++).second.get<float>(""); auto z = (*first++).second.get<float>(""); return glm::vec3(x, y, z); }; path scene_file{"scene.json"}; BOOST_LOG_TRIVIAL(info) << "Importing scene file " << scene_file << "..."; boost::property_tree::ptree root; boost::property_tree::read_json(scene_file.string(), root); vector<path> models, dynamics; vector<glm::mat4> transformations; for (auto entity : root.get_child("entities") | boost::adaptors::map_values) { auto model = entity.get<string>("model"); models.emplace_back(move(model)); auto dynamic = entity.get<string>("dynamic"); dynamics.emplace_back(move(dynamic)); glm::mat4 transformation; // Identity matrix if (auto transformation_root_ = entity.get_child_optional("transformation")) for (auto& transformation_child : *transformation_root_) { if (auto scale = transformation_child.second.get_optional<float>("scale")) transformation = glm::scale(transformation, glm::vec3{*scale}); if (auto translation = transformation_child.second.get_child_optional("translate")) transformation = glm::translate(transformation, get_vec3(*translation)); } transformations.emplace_back(transformation); } all_statics.emplace_back(std::make_shared<entities>( models, dynamics, transformations)); using rendering_entities = rendering_assets_type::external_entities; vector<rendering_entities> asset_data; asset_data.reserve(all_statics.size()); int id{0}; for (const auto& entity : all_statics) asset_data.emplace_back( id++, boost::make_iterator_range(entity->begin(entity->models), entity->end(entity->models)), boost::make_iterator_range(entity->begin(entity->transformations), entity->end(entity->transformations))); rendering_assets.add_statics(cbegin(asset_data), cend(asset_data)); // Camera view.eye = get_vec3(root.get_child("camera.eye")); view.target = get_vec3(root.get_child("camera.target")); view.on_view_moved(); }