/** * get stats, about this rounds * * returns 1, if one species died */ int get_stats(int step, struct StepResult **last_result, struct SimulationResult *result) { int num_processes = get_num_processes(); int died = 0; struct StepResult step_result = {0, 0, 0, 0}; // global // receive stats about this round if(num_processes > 1) { struct StepResult local = {0, 0, 0, 0}; struct StepResult *tmp = calculate_step_result(step); memcpy(&local, tmp, sizeof(struct StepResult)); free(tmp); output("%d step_result %d: %d %d %d\n", get_rank(), local.current_step, local.amount_predators, local.amount_prey, local.amount_plants); MPI_Reduce(&local, &step_result, 1, MPI_Struct_StepResult, MPI_Op_Sum_StepResult, 0, MPI_COMM_WORLD); } else { struct StepResult *tmp = calculate_step_result(step); memcpy(&step_result, tmp, sizeof(struct StepResult)); } if(get_rank() == 0) { step_result.current_step = step; step_result.next = 0; if(*last_result == 0) { *last_result = malloc(sizeof(struct StepResult)); result->first_step_result = *last_result; } else { (*last_result)->next = malloc(sizeof(struct StepResult)); *last_result = (*last_result)->next; } **last_result = step_result; result->operations += step_result.operations; printf(" - %d predators / %d prey / %d plants\n", step_result.amount_predators, step_result.amount_prey, step_result.amount_plants); died = step_result.amount_predators == 0 || step_result.amount_prey == 0; if(died) { printf("\nOne species died!\n"); } } // broadcast to all processes, whether one species died MPI_Bcast(&died, 1, MPI_INT, 0, MPI_COMM_WORLD); return died; }
/** * Initialises the MPI environment with MPI_Init_thread. pagmo::mpi_environment objects should be created only in the main * thread of execution. * * @throws std::runtime_error if another instance of this class has already been created, * or if the MPI implementation does not support at least the MPI_THREAD_SERIALIZED thread level and this is the root node, * or if the world size is not at least 2. */ mpi_environment::mpi_environment() { if (m_initialised) { pagmo_throw(std::runtime_error,"cannot re-initialise the MPI environment"); } m_initialised = true; int thread_level_provided; MPI_Init_thread(NULL,NULL,MPI_THREAD_MULTIPLE,&thread_level_provided); if (thread_level_provided >= MPI_THREAD_MULTIPLE) { m_multithread = true; } if (get_rank()) { // If this is a slave, it will have to stop here, listen for jobs, execute them, and exit() // when signalled to do so. listen(); } // If this is the root node, it will need to be able to call MPI from multiple threads. if (thread_level_provided < MPI_THREAD_SERIALIZED && get_rank() == 0) { pagmo_throw(std::runtime_error,"the master node must support at least the MPI_THREAD_SERIALIZED thread level"); } // World sizes less than 2 are not allowed. if (get_size() < 2) { pagmo_throw(std::runtime_error,"the size of the MPI world must be at least 2"); } }
unsigned int DisjointSets<T, hasher>::merge_classes( unsigned int c1, unsigned int c2 ) { //get class representatives const unsigned int c1_rep = get_rep(c1) ; const unsigned int c2_rep = get_rep(c2) ; //if the classes are already merged, do nothing if(c1_rep == c2_rep) return c1_rep ; //merge //get the class ranks const unsigned int c1_rank = get_rank(c1_rep) ; const unsigned int c2_rank = get_rank(c2_rep) ; if(c1_rank < c2_rank) { //tree for c2 is deeper, use c2 as root set_rep(c1_rep, c2_rep) ; return c2_rep ; } else if(c2_rank < c1_rank) { //tree for c1 is deeper, use c1 as root set_rep(c2_rep, c1_rep) ; return c1_rep ; } else { //trees have the same depth, use c1 as root and increase its depth set_rep(c2_rep, c1_rep) ; set_rank(c1_rep, c1_rank+1) ; return c1_rep ; } }
/* * Get the number of nodes < key/value. * S (8) ->size * / \ * / \ * / \ * (4)E W(3) * / \ /\ * / \ / \ * (2)A H T Z (1) * \ * C * * For E -> rank is get_size(A) = 2 */ int get_rank(struct node *head, int value) { if(head == NULL) return 0; if(value < head->value) { return get_rank(head->left, value); } else if(value > head->value) { return 1 + get_size(head->left) + get_rank(head->right, value); } else { // value == head->value return get_size(head->left); } }
static int cmp_results(struct result *a, struct result *b) { if (a->relevance < b->relevance) return -1; else if (a->relevance > b->relevance) return 1; else if (get_rank(a) > get_rank(b)) return -1; else if (get_rank(a) < get_rank(b)) return 1; else return 0; }
void long_sequence_of_read_at_write_at_operations() { MPI_File file; int result = MPI_File_open_pmem(global_context->communicator, global_context->file_path, MPI_MODE_RDWR, create_mpi_info(), &file); assert_true(result == MPI_SUCCESS, "MPI_File_open_pmem returned with error"); int size = 64; char* file_fragment = (char*) malloc(size * sizeof(char)); for (int i=0; i<LONG_SEQUENCE_LENGTH; i++) { if (rand() % 2) { int location = rand() % (global_context->file_size - strlen(TEXT_TO_WRITE)); result = MPI_File_write_at_pmem(file, location, TEXT_TO_WRITE, strlen(TEXT_TO_WRITE), MPI_CHAR, MPI_STATUS_IGNORE); assert_true(result == MPI_SUCCESS, "MPI_File_write_at_pmem returned with error"); } else { int location = rand() % (global_context->file_size - size); result = MPI_File_read_at_pmem(file, location, file_fragment, size, MPI_BYTE, MPI_STATUS_IGNORE); assert_true(result == MPI_SUCCESS, "MPI_File_write_at_pmem returned with error"); } } result = MPI_File_close_pmem(&file); assert_true(result == MPI_SUCCESS, "MPI_File_close_pmem returned with error"); if (get_rank(global_context->communicator) == 0) { assert_true(get_file_size() == global_context->file_size, "File changed its size after read/write operations"); } }
void write_at_processes_wrote_correct_bytes_into_overlapping_parts() { int rank = get_rank(global_context->communicator); int comm_size = get_comm_size(); MPI_File file; int result = MPI_File_open_pmem(global_context->communicator, global_context->file_path, MPI_MODE_RDWR, create_mpi_info(), &file); assert_true(result == MPI_SUCCESS, "MPI_File_open_pmem returned with error"); int location_modifier = strlen(TEXT_TO_WRITE) / get_comm_size() / 2; for (int i=0; i<comm_size; i++) { if (rank == i) { result = MPI_File_write_at_pmem(file, rank * location_modifier, TEXT_TO_WRITE, strlen(TEXT_TO_WRITE), MPI_CHAR, MPI_STATUS_IGNORE); assert_true(result == MPI_SUCCESS, "MPI_File_write_at_pmem returned with error"); } MPI_Barrier(global_context->communicator); } result = MPI_File_close_pmem(&file); assert_true(result == MPI_SUCCESS, "MPI_File_close_pmem returned with error"); if (rank == 0) { assert_true(get_file_size() == global_context->file_size, "File changed its size after read/write operations"); char* final_text = (char*) malloc(strlen(TEXT_TO_WRITE) * get_comm_size()); for (int i=0; i<get_comm_size(); i++) { strcpy(final_text + i * location_modifier, TEXT_TO_WRITE); } char* c_read_result = c_read_at(0, strlen(final_text)); assert_mem_equals(final_text, c_read_result, strlen(final_text), "Function wrote incorrect bytes"); } share_message_errors(); }
struct SimulationResult* run_simulation() { int rank = get_rank(); int num_processes = get_num_processes(); struct SimulationResult *result = 0; struct StepResult *last_result = 0; // only the master process collects the simulation results if(rank == 0) { result = malloc(sizeof(struct SimulationResult)); result->operations = 0; // set start time gettimeofday(&result->start_time, NULL); } init_map(); MPI_Barrier(MPI_COMM_WORLD); int i = 0; int died = 0; if(rank == 0) { printf("Start Population\n"); } died = get_stats(i, &last_result, result); while(i < MAX_SIMULATION_STEPS && !died) { i++; if(rank == 0) printf("Simulation Step %d\n", i); simulation_step(i); died = get_stats(i, &last_result, result); if(num_processes == 1) { print_bitmap(i); } } MPI_Barrier(MPI_COMM_WORLD); if(rank == 0) { gettimeofday(&result->finish_time, NULL); calc_runtime(result); } return result; }
static void pseudo_anchor_sort(UInt32 *a,Int32 n,Int32 pseudo_anchor_pos, Int32 offset) { Int32 get_rank(Int32); Int32 get_rank_update_anchors(Int32); Int32 pseudo_anchor_rank; /* ---------- compute rank ------------ */ if(Update_anchor_ranks!=0 && Anchor_dist>0) pseudo_anchor_rank = get_rank_update_anchors(pseudo_anchor_pos); else pseudo_anchor_rank = get_rank(pseudo_anchor_pos); /* ---------- check rank -------------- */ assert(Sa[pseudo_anchor_rank]== (UInt32) pseudo_anchor_pos); /* ---------- do the sorting ---------- */ general_anchor_sort(a,n,pseudo_anchor_pos,pseudo_anchor_rank,offset); }
void import() { unique_ptr<Driver> driver(Driver::create_driver(FLAGS_driver)); Status status = driver->connect(); if (!status.ok()) { LOG(ERROR) << "Failed to connect:" << status.message(); return; } auto num_clients = get_total_clients(); int num_files = FLAGS_records_per_index; vector<string> files; if (num_clients > 0) { num_files /= num_clients; files.reserve(num_files); int my_rank = get_rank(); LOG(ERROR) << "my rank: " << my_rank << " total: " << num_clients; for (int i = my_rank * num_files; i < (my_rank + 1) * num_files; i++) { files.emplace_back(FLAGS_path + "/file-" + lexical_cast<string>(i)); } LOG(ERROR) << "INsert from " << *files.begin() << " to " << files.back(); } else { files.reserve(FLAGS_records_per_index); for (int i = 0; i < FLAGS_records_per_index; i++) { files.emplace_back(FLAGS_path + "/file-" + lexical_cast<string>(i)); } } driver->import(files); }
void CUIMpTradeWnd::SetCurrentItem(CUICellItem* itm) { if(m_pCurrentCellItem == itm) return; m_pCurrentCellItem = itm; m_item_info->InitItem (CurrentIItem()); if (m_pCurrentCellItem) { const shared_str& current_sect_name = CurrentIItem()->object().cNameSect(); string256 str; sprintf_s (str, "%d", GetItemPrice(CurrentIItem())); m_item_info->UICost->SetText (str); m_item_info->UIName->SetText (CurrentIItem()->NameShort()); string64 tex_name; string64 team; if (m_store_hierarchy->FindItem(current_sect_name) ) {// our team strcpy_s (team, _team_names[m_store_hierarchy->TeamIdx()]); }else { strcpy_s (team, _team_names[m_store_hierarchy->TeamIdx()%1]); } sprintf_s (tex_name, "ui_hud_status_%s_0%d", team, 1+get_rank(current_sect_name.c_str()) ); m_static_item_rank->InitTexture (tex_name); m_static_item_rank->TextureOn (); } else { m_static_item_rank->TextureOff (); } }
/* init_sres initializes libSRES functionality, including population data, generations, ranges, etc. parameters: ip: the program's input parameters sp: parameters required by libSRES returns: nothing notes: Excuse the awful variable names. They are named according to libSRES conventions for the sake of consistency. Many of the parameters required by libSRES are not configurable via the command-line because they haven't needed to be changed but this does not mean they aren't significant. todo: */ void init_sres (input_params& ip, sres_params& sp) { // Initialize parameters required by libSRES int es = esDefESSlash; int constraint = 0; int dim = ip.num_dims; int miu = ip.pop_parents; int lambda = ip.pop_total; int gen = ip.generations; double gamma = esDefGamma; double alpha = esDefAlpha; double varphi = esDefVarphi; int retry = 0; sp.pf = essrDefPf; // Transform is a dummy function f(x)->x but is still required to fit libSRES's code structure sp.trsfm = (ESfcnTrsfm*)mallocate(sizeof(ESfcnTrsfm) * dim); for (int i = 0; i < dim; i++) { sp.trsfm[i] = transform; } // Call libSRES's initialize function int rank = get_rank(); ostream& v = term->verbose(); if (rank == 0) { cout << term->blue << "Running libSRES initialization simulations " << term->reset << ". . . "; cout.flush(); v << endl; } ESInitial(ip.seed, &(sp.param), sp.trsfm, fitness, es, constraint, dim, sp.ub, sp.lb, miu, lambda, gen, gamma, alpha, varphi, retry, &(sp.population), &(sp.stats)); if (rank == 0) { cout << term->blue << "Done"; v << " with libSRES initialization simulations"; cout << term->reset << endl; } }
/** * \brief NCCL implementation of \ref gpucomm_broadcast. */ static int broadcast(gpudata *array, size_t offset, size_t count, int typecode, int root, gpucomm *comm) { // need dummy init so that compiler shuts up ncclDataType_t datatype = ncclNumTypes; int rank = 0; cuda_context *ctx; ASSERT_BUF(array); ASSERT_COMM(comm); GA_CHECK(check_restrictions(array, offset, NULL, 0, count, typecode, 0, comm, &datatype, NULL)); GA_CHECK(get_rank(comm, &rank)); ctx = comm->ctx; cuda_enter(ctx); // sync: wait till a write has finished (out of concurrent kernels) if (rank == root) GA_CUDA_EXIT_ON_ERROR(ctx, cuda_wait(array, CUDA_WAIT_READ)); else GA_CUDA_EXIT_ON_ERROR(ctx, cuda_wait(array, CUDA_WAIT_WRITE)); // change stream of nccl ops to enable concurrency NCCL_EXIT_ON_ERROR(ctx, ncclBcast((void *)(array->ptr + offset), count, datatype, root, comm->c, ctx->s)); if (rank == root) GA_CUDA_EXIT_ON_ERROR(ctx, cuda_record(array, CUDA_WAIT_READ)); else GA_CUDA_EXIT_ON_ERROR(ctx, cuda_record(array, CUDA_WAIT_WRITE)); cuda_exit(ctx); return GA_NO_ERROR; }
bool CAI_Stalker::conflicted (const CInventoryItem *item, const CWeapon *new_weapon, bool new_wepon_enough_ammo, int new_weapon_rank) const { if (non_conflicted(item,new_weapon)) return (false); const CWeapon *weapon = smart_cast<const CWeapon*>(item); VERIFY (weapon); bool current_wepon_enough_ammo = enough_ammo(weapon); if (current_wepon_enough_ammo && !new_wepon_enough_ammo) return (true); if (!current_wepon_enough_ammo && new_wepon_enough_ammo) return (false); if (!fsimilar(weapon->GetCondition(),new_weapon->GetCondition(),.05f)) return (weapon->GetCondition() >= new_weapon->GetCondition()); if (weapon->ef_weapon_type() != new_weapon->ef_weapon_type()) return (weapon->ef_weapon_type() >= new_weapon->ef_weapon_type()); u32 weapon_rank = get_rank(weapon->cNameSect()); if (weapon_rank != (u32)new_weapon_rank) return (weapon_rank >= (u32)new_weapon_rank); return (true); }
/** * Draws the card to the screen at it's location */ void GraphicalCard::draw() { int x = 2, y = 4; if (is_faceup()) { x = get_rank() - 1; y = get_suit(); } draw_bitmap_part(_spritesheet, x*CARD_WIDTH, y*CARD_HEIGHT, CARD_WIDTH, CARD_HEIGHT, _position.x, _position.y); }
bool event::operator < (const event & rhs) const { if (m_timestamp == rhs.m_timestamp) return get_rank() < rhs.get_rank(); else return m_timestamp < rhs.m_timestamp; }
void send_delete_brush(int i) { if (get_rank() == 0 && free_brush(i)) { send_event(EVENT_DELETE_BRUSH); send_index(i); } }
bool ValueFilter::is_ordered() const { if (get_rank() == 2) { double hi = mask&(1 << LT) ? thresholds[LT] : thresholds[LE]; double lo = mask&(1 << GT) ? thresholds[GT] : thresholds[GE]; return lo < hi; } return true; }
/* 第 2 位の数を表示する */ void print_2nd( int a[] ) { int i; for ( i = 0; i < SIZE; i++ ){ if ( get_rank(a, i) == 2 ){ printf("%d が第 2 位です\n", a[i]); } } }
bool ReplicaExchange::do_exchange(double myscore0, double myscore1, int findex) { double myscore=myscore0-myscore1; double fscore; int myindex=index_[myrank_]; int frank=get_rank(findex); MPI_Sendrecv(&myscore,1,MPI_DOUBLE,frank,myrank_, &fscore,1,MPI_DOUBLE,frank,frank, MPI_COMM_WORLD, &status_); bool do_accept=get_acceptance(myscore,fscore); boost::scoped_array<int> sdel(new int[nproc_ - 1]); boost::scoped_array<int> rdel(new int[nproc_ - 1]); for(int i=0;i<nproc_-1;++i) {sdel[i]=0;} if(do_accept){ std::map<std::string,Floats>::iterator it; for (it = parameters_.begin(); it != parameters_.end(); it++){ Floats param = get_friend_parameter((*it).first,findex); set_my_parameter((*it).first,param); } //update the increment vector only to those replicas that upgraded to //a higher temperature to avoid double // calculations (excluding the transition 0 -> nrep-1) int delindex=findex-myindex; if (delindex==1){ //std::cout << myindex << " " << findex << " " << std::endl; sdel[myindex]=1; } //update the indexes myindex=findex; } MPI_Barrier(MPI_COMM_WORLD); //get the increment vector from all replicas and copy it to the //exchange array MPI_Allreduce(sdel.get(),rdel.get(),nproc_-1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); for(int i=0;i<nproc_-1;++i) {exarray_[i]=rdel[i];} // in any case, update index vector boost::scoped_array<int> sbuf(new int[nproc_]); boost::scoped_array<int> rbuf(new int[nproc_]); for(int i=0;i<nproc_;++i) {sbuf[i]=0;} sbuf[myrank_]=myindex; MPI_Allreduce(sbuf.get(),rbuf.get(),nproc_,MPI_INT,MPI_SUM,MPI_COMM_WORLD); for(int i=0;i<nproc_;++i){index_[i]=rbuf[i];} return do_accept; }
/* run_sres iterates through every specified generation of libSRES parameters: sp: parameters required by libSRES returns: nothing notes: todo: */ void run_sres (sres_params& sp) { int rank = get_rank(); while (sp.stats->curgen < sp.param->gen) { int cur_gen = sp.stats->curgen; if (rank == 0) { cout << term->blue << "Starting generation " << term->reset << cur_gen << " . . ." << endl; } ESStep(sp.population, sp.param, sp.stats, sp.pf); if (rank == 0) { cout << term->blue << "Done with generation " << term->reset << cur_gen << endl; } } }
/** * Will send a shutdown signal to all processes with nonzero rank and call MPI_Finalize(). */ mpi_environment::~mpi_environment() { // In theory this should never be called by the slaves. pagmo_assert(!get_rank()); pagmo_assert(m_initialised); std::pair<boost::shared_ptr<population>,algorithm::base_ptr> shutdown_payload; for (int i = 1; i < get_size(); ++i) { // Send the shutdown signal to all slaves. send(shutdown_payload,i); } MPI_Finalize(); m_initialised = false; }
int main (int argc, char **argv) { flux_t h; flux_rpc_t *rpc; if (!(h = flux_open (NULL, 0))) err_exit ("flux_open"); if (!(rpc = flux_rpc (h, "cmb.info", NULL, FLUX_NODEID_ANY, 0))) err_exit ("flux_rpc"); get_rank (rpc); flux_rpc_destroy (rpc); flux_close (h); return (0); }
Int32 get_rank_update_anchors(Int32 pos) { Int32 get_rank(Int32 pos); Int32 sb, lo, hi, j, toffset, aoffset, anchor, rank; assert(Anchor_dist>0); // --- get bucket and verify it is a sorted one sb = Get_small_bucket(pos); if(!(IS_SORTED_BUCKET(sb))) { fprintf(stderr,"Illegal call to get_rank! (get_rank_update_anchors)\n"); exit(1); } // --- if the bucket has been already ranked just compute rank; if(bucket_ranked[sb]) return get_rank(pos); // --- rank all the bucket bucket_ranked[sb]=1; rank = -1; lo = BUCKET_FIRST(sb); hi = BUCKET_LAST(sb); for(j=lo;j<=hi;j++) { // see if we can update an anchor toffset = Sa[j]%Anchor_dist; anchor = Sa[j]/Anchor_dist; aoffset = Anchor_offset[anchor]; // dist of sorted suf from anchor if(toffset<aoffset) { Anchor_offset[anchor] = toffset; Anchor_rank[anchor] = j; } // see if we have found the rank of pos, if so store it in rank if(Sa[j]==pos) { assert(rank==-1); rank=j; } } assert(rank>=0); return rank; }
void dfprintf_fileLine(FILE* fptr, const char *func, const char *file, int line_number, const char *format, ...) { fflush(fptr); va_list args; va_start(args, format); fprintf(fptr, "(%d) DEBUG %s(), %s:%d: ", get_rank(), func, file, // my_basename(file), line_number); /* print out remainder of message */ vfprintf(fptr, format, args); va_end(args); fprintf(fptr,"\n"); fflush(fptr); }
bool CAI_Stalker::can_take (CInventoryItem const * item) { const CWeapon *new_weapon = smart_cast<const CWeapon*>(item); if (!new_weapon) return (false); bool new_weapon_enough_ammo = enough_ammo(new_weapon); u32 new_weapon_rank = get_rank(new_weapon->cNameSect()); TIItemContainer::iterator I = inventory().m_all.begin(); TIItemContainer::iterator E = inventory().m_all.end(); for ( ; I != E; ++I) if (conflicted(*I,new_weapon,new_weapon_enough_ammo,new_weapon_rank)) return (false); return (true); }
/** * \brief NCCL implementation of \ref gpucomm_reduce. */ static int reduce(gpudata *src, size_t offsrc, gpudata *dest, size_t offdest, size_t count, int typecode, int opcode, int root, gpucomm *comm) { // need dummy init so that compiler shuts up ncclRedOp_t op = ncclNumOps; ncclDataType_t datatype = ncclNumTypes; gpudata *dst = NULL; int rank = 0; cuda_context *ctx; ASSERT_BUF(src); ASSERT_COMM(comm); GA_CHECK(get_rank(comm, &rank)); if (rank == root) { dst = dest; ASSERT_BUF(dest); } GA_CHECK(check_restrictions(src, offsrc, dst, offdest, count, typecode, opcode, comm, &datatype, &op)); ctx = comm->ctx; cuda_enter(ctx); // sync: wait till a write has finished (out of concurrent kernels) GA_CUDA_EXIT_ON_ERROR(ctx, cuda_wait(src, CUDA_WAIT_READ)); // sync: wait till a read/write has finished (out of concurrent kernels) if (rank == root) GA_CUDA_EXIT_ON_ERROR(ctx, cuda_wait(dest, CUDA_WAIT_WRITE)); // change stream of nccl ops to enable concurrency if (rank == root) NCCL_EXIT_ON_ERROR(ctx, ncclReduce((void *)(src->ptr + offsrc), (void *)(dest->ptr + offdest), count, datatype, op, root, comm->c, ctx->s)); else NCCL_EXIT_ON_ERROR(ctx, ncclReduce((void *)(src->ptr + offsrc), NULL, count, datatype, op, root, comm->c, ctx->s)); GA_CUDA_EXIT_ON_ERROR(ctx, cuda_record(src, CUDA_WAIT_READ)); if (rank == root) GA_CUDA_EXIT_ON_ERROR(ctx, cuda_record(dest, CUDA_WAIT_WRITE)); cuda_exit(ctx); return GA_NO_ERROR; }
/* * utility function to get dimensionality of a dataset */ int get_rank_by_name(hid_t group_id, char *name) { if (!checkfordataset(group_id,name)) { return 0; } herr_t HDF5_error = -1; #if H5_VERSION_GE(1,8,0) hid_t dataset_id = H5Dopen2(group_id,name,H5P_DEFAULT); #else hid_t dataset_id = H5Dopen(group_id,name); #endif if (dataset_id == HDF5_error) { printf("ERROR opening %s data set \n",name); return 0; } hid_t dataspace_id = H5Dget_space(dataset_id); int rank = get_rank(dataspace_id); H5Dclose(dataset_id); return rank; }
Floats ReplicaExchange::get_friend_parameter(std::string key, int findex) { int frank=get_rank(findex); int nparam=parameters_[key].size(); double* myparameters=new double[nparam]; std::copy(parameters_[key].begin(), parameters_[key].end(), myparameters); double* fparameters=new double[nparam]; MPI_Sendrecv(myparameters,nparam,MPI_DOUBLE,frank,myrank_, fparameters,nparam,MPI_DOUBLE,frank,frank, MPI_COMM_WORLD,&status_); Floats fpar(fparameters,fparameters+nparam); delete [] myparameters; delete [] fparameters; return fpar; }
void player_status() { unsigned int level, r; const char *rank; level = player.xp >> 4; r = level >> 2; if(r > 7) r = 7; rank = get_rank(player.role, r); stat_printf("%s the %s\nFloor:%u HP:%u(%u) Lvl:%u St:%u Dx:%u Co:%u In:%u Wi:%u Ch:%u T:%u", player.name, rank, player.level->floor, player.stats.hp, player.stats.hpmax, level, player.stats.st, player.stats.dx, player.stats.co, player.stats.in, player.stats.wi, player.stats.ch, player.turn); }