SOL_API void sol_flow_foreach_builtin_node_type(bool (*cb)(void *data, const struct sol_flow_node_type *type), const void *data) { #if (SOL_FLOW_BUILTIN_NODE_TYPE_COUNT > 0) const void *const *itr; unsigned int i; SOL_NULL_CHECK(cb); for (i = 0, itr = SOL_FLOW_BUILTIN_NODE_TYPE_ALL; i < SOL_FLOW_BUILTIN_NODE_TYPE_COUNT; i++, itr++) { const struct sol_flow_node_type * (*type_func)(bool (*cb)(void *data, const struct sol_flow_node_type *type), const void *data) = *itr; const struct sol_flow_node_type *t = type_func(cb, data); if (t && !cb((void *)data, t)) { break; } } #endif }
/* Load a particular status provider */ static gboolean load_status_provider (gpointer dir) { gchar * provider = dir; /* load the module */ GModule * module = NULL; if (g_file_test(provider, G_FILE_TEST_EXISTS)) { g_debug("Loading status provider: %s", provider); module = g_module_open(provider, G_MODULE_BIND_LAZY | G_MODULE_BIND_LOCAL); if (module == NULL) { g_warning("Unable to open module: %s", provider); } } /* find the status provider's GType */ GType provider_type = 0; if (module != NULL) { GType (*type_func) (void); if (!g_module_symbol(module, STATUS_PROVIDER_EXPORT_S, (gpointer *)&type_func)) { g_warning("Unable to find type symbol in: %s", provider); } else { provider_type = type_func(); if (provider_type == 0) { g_warning("Unable to create type from: %s", provider); } } } /* instantiate the status provider */ StatusProvider * sprovider = NULL; if (provider_type != 0) { sprovider = STATUS_PROVIDER(g_object_new(provider_type, NULL)); if (sprovider == NULL) { g_warning("Unable to build provider from: %s", provider); } } /* use the provider */ if (sprovider != NULL) { /* On update let's talk to all of them and create the aggregate value to export */ g_signal_connect(G_OBJECT(sprovider), STATUS_PROVIDER_SIGNAL_STATUS_CHANGED, G_CALLBACK(update_status), NULL); /* Attach the module object to the status provider so that when the status provider is free'd the module is closed automatically. */ g_object_set_data_full(G_OBJECT(sprovider), "status-provider-module", module, module_destroy_in_idle); module = NULL; /* don't close module in this func */ status_providers = g_list_prepend(status_providers, sprovider); /* Force an update to ensure a consistent state */ update_status(); } /* cleanup */ if (module != NULL) g_module_close(module); g_free(provider); return FALSE; /* only call this idle func once */ }
//---------------------------------------------------------------------------- // // radix-k merge // // did: decomposition id // its: pointers to input/ouput items, results in first number of output items // hdrs: pointers to input headers (optional, pass NULL if unnecessary) // nr: number of rounds // kv: k vector, radix for each round // cc: pointer to communicate object // assign: pointer to assignment object // merge_func: pointer to merging function // create_func: pointer to function that creates item // destroy_func: pointer to function that destroys item // type_func: pointer to function that creates MPI datatype for item // // side effects: allocates output items and array of pointers to them, if // not reducing in-place // // returns: number of output items // int Merge::MergeBlocks(int did, char **its, int **hdrs, int nr, int *kv, Comm *cc, Assignment *assign, void (*merge_func)(char **, int *, int, int *), char * (*create_func)(int *), void (*destroy_func)(void *), void (*type_func)(void*, MPI_Datatype*, int *)) { int rank, groupsize; // MPI usual int gid; // global id of current item block int p; // process rank MPI_Datatype dtype; // data type int ng; // number of groups this process owns int nb = assign->NumBlks(); // number of blocks this process owns vector<char *> my_its(its, its + nb); // copy of its vector<bool> done(nb, false); // done items vector<int> root_gids; // distinct gids of root blocks // init assert(nr > 0 && nr <= DIY_MAX_R); // sanity MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &groupsize); // for all rounds for (int r = 0; r < nr; r++){ int n_recv = 0; // number of received blocks by root int partners[kv[r]]; // gids of partners in a group root_gids.clear(); root_gids.reserve(kv[r]); // all my current blocks must participate in a round (send or receive) for (int b = 0; b < nb; b++) { if (!done[b]) { // blocks that survived to this round gid = DIY_Gid(did, b); bool root = GetPartners(kv, r, gid, partners); if (!root) { // nonroots post sends of headers and items p = assign->Gid2Proc(partners[kv[r] - 1]); if (hdrs) type_func(my_its[b], &dtype, hdrs[b]); else type_func(my_its[b], &dtype, NULL); // tag is source block gid if (hdrs && dtype_absolute_address) cc->SendItem((char *)MPI_BOTTOM, hdrs[b], p, gid, &dtype); else if (hdrs && !dtype_absolute_address) cc->SendItem((char *)my_its[b], hdrs[b], p, gid, &dtype); else if (!hdrs && dtype_absolute_address) cc->SendItem((char *)MPI_BOTTOM, NULL, p, gid, &dtype); else cc->SendItem((char *)my_its[b], NULL, p, gid, &dtype); MPI_Type_free(&dtype); done[b] = true; // nonroot blocks are done after they have been sent } else { // root posts receives of headers root_gids.push_back(partners[kv[r] - 1]); for (int k = 0; k < kv[r] - 1; k++) { // receive the others p = assign->Gid2Proc(partners[k]); cc->StartRecvItem(p, hdrs); n_recv++; } } } // blocks that survived to this round } // all my current blocks // finish receiving all items char *recv_its[n_recv]; // received items int recv_gids[n_recv]; // (source) gids of the received items int recv_procs[n_recv]; // source proc of each received item cc->FinishRecvItemsMerge(recv_its, recv_gids, recv_procs, create_func, type_func); // merge each group ng = (int)root_gids.size(); // number of groups this process owns for (int j = 0; j < ng; j++) { vector<char *>reduce_its; // items ready for reduction in a group vector<int>reduce_gids; // gids for reduce_its reduce_its.reserve(kv[r]); reduce_gids.reserve(kv[r]); int lid = assign->Gid2Lid(root_gids[j]); reduce_its.push_back(my_its[lid]); reduce_gids.push_back(root_gids[j]); GetPartners(kv, r, root_gids[j], partners); for (int i = 0; i < n_recv; i++) { // collect items for this group if (find(partners, partners + kv[r], recv_gids[i]) != partners + kv[r]) { reduce_its.push_back(recv_its[i]); reduce_gids.push_back(recv_gids[i]); } } // header from root block of merge is used if (hdrs) merge_func(&reduce_its[0], &reduce_gids[0], kv[r], hdrs[lid]); else merge_func(&reduce_its[0], &reduce_gids[0], kv[r], NULL); my_its[lid] = reduce_its[0]; } // cleanup if (ng) { for (int i = 0; i < n_recv; i++) destroy_func(recv_its[i]); } } // for all rounds // move results to the front, swapping them rather than copying so that user // can free all items without having duplicated pointers that get freed // multiple times for (int i = 0; i < ng; i++) { char *temp = its[i]; its[i] = my_its[assign->Gid2Lid(root_gids[i])]; its[assign->Gid2Lid(root_gids[i])] = temp; } return ng; }