Exemplo n.º 1
0
static struct gpu_perf_comm *
lookup_comm(struct gpu_perf *gp, pid_t pid)
{
	struct gpu_perf_comm *comm;

	if (pid == 0)
		return NULL;

	for (comm = gp->comm; comm != NULL; comm = comm->next) {
		if (comm->pid == pid)
			break;
	}
	if (comm == NULL) {
		comm = calloc(1, sizeof(*comm));
		if (comm == NULL)
			return NULL;

		if (get_comm(pid, comm->name, sizeof(comm->name)) < 0) {
			free(comm);
			return NULL;
		}

		comm->pid = pid;
		comm->next = gp->comm;
		gp->comm = comm;
	}

	return comm;
}
static int test_str(struct event_format *event,
		    struct filter_arg *arg, struct pevent_record *record)
{
	const char *val;

	if (arg->str.field == &comm)
		val = get_comm(event, record);
	else
		val = get_field_str(arg, record);

	switch (arg->str.type) {
	case FILTER_CMP_MATCH:
		return strcmp(val, arg->str.val) == 0;

	case FILTER_CMP_NOT_MATCH:
		return strcmp(val, arg->str.val) != 0;

	case FILTER_CMP_REGEX:
		/* Returns zero on match */
		return !regexec(&arg->str.reg, val, 0, NULL, 0);

	case FILTER_CMP_NOT_REGEX:
		return regexec(&arg->str.reg, val, 0, NULL, 0);

	default:
		/* ?? */
		return 0;
	}
}
static unsigned long long
get_value(struct event_format *event,
	  struct format_field *field, struct pevent_record *record)
{
	unsigned long long val;

	/* Handle our dummy "comm" field */
	if (field == &comm) {
		const char *name;

		name = get_comm(event, record);
		return (unsigned long)name;
	}

	pevent_read_number_field(field, record->data, &val);

	if (!(field->flags & FIELD_IS_SIGNED))
		return val;

	switch (field->size) {
	case 1:
		return (char)val;
	case 2:
		return (short)val;
	case 4:
		return (int)val;
	case 8:
		return (long long)val;
	}
	return val;
}
Exemplo n.º 4
0
 void run_test_on_num_procs_or_less(int numProcs, stk::mesh::BulkData::AutomaticAuraOption auraOption)
 {
     if(stk::parallel_machine_size(get_comm()) <= numProcs)
     {
         run_test(auraOption);
     }
 }
Exemplo n.º 5
0
void mpi_comm_create_(int* comm, int* group, int* newcomm, int* ierr) {
  MPI_Comm tmp;

  *ierr = MPI_Comm_create(get_comm(*comm),get_group(*group), &tmp);
  if(*ierr == MPI_SUCCESS) {
    *newcomm = new_comm(tmp);
  }
}
Exemplo n.º 6
0
void mpi_comm_dup_(int* comm, int* newcomm, int* ierr) {
  MPI_Comm tmp;

  *ierr = MPI_Comm_dup(get_comm(*comm), &tmp);
  if(*ierr == MPI_SUCCESS) {
    *newcomm = new_comm(tmp);
  }
}
Exemplo n.º 7
0
void mpi_comm_group_(int* comm, int* group_out,  int* ierr) {
  MPI_Group tmp;

  *ierr = MPI_Comm_group(get_comm(*comm), &tmp);
  if(*ierr == MPI_SUCCESS) {
    *group_out = new_group(tmp);
  }
}
Exemplo n.º 8
0
void mpi_sendrecv_(void* sendbuf, int* sendcount, int* sendtype, int* dst,
                int* sendtag, void *recvbuf, int* recvcount,
                int* recvtype, int* src, int* recvtag,
                int* comm, MPI_Status* status, int* ierr) {
   *ierr = MPI_Sendrecv(sendbuf, *sendcount, get_datatype(*sendtype), *dst,
       *sendtag, recvbuf, *recvcount,get_datatype(*recvtype), *src, *recvtag,
       get_comm(*comm), status);
}
Exemplo n.º 9
0
void mpi_comm_split_(int* comm, int* color, int* key, int* comm_out, int* ierr) {
  MPI_Comm tmp;

  *ierr = MPI_Comm_split(get_comm(*comm), *color, *key, &tmp);
  if(*ierr == MPI_SUCCESS) {
    *comm_out = new_comm(tmp);
  }
}
Exemplo n.º 10
0
void mpi_comm_free_(int* comm, int* ierr) {
  MPI_Comm tmp = get_comm(*comm);

  *ierr = MPI_Comm_free(&tmp);

  if(*ierr == MPI_SUCCESS) {
    free_comm(*comm);
  }
}
Exemplo n.º 11
0
void mpi_irecv_(void *buf, int* count, int* datatype, int* src, int* tag,
                 int* comm, int* request, int* ierr) {
  MPI_Request req;

  *ierr = MPI_Irecv(buf, *count, get_datatype(*datatype), *src, *tag,
                    get_comm(*comm), &req);
  if(*ierr == MPI_SUCCESS) {
    *request = new_request(req);
  }
}
Exemplo n.º 12
0
void mpi_send_init_(void *buf, int* count, int* datatype, int* dst, int* tag,
                     int* comm, int* request, int* ierr) {
  MPI_Request req;

  *ierr = MPI_Send_init(buf, *count, get_datatype(*datatype), *dst, *tag,
                        get_comm(*comm), &req);
  if(*ierr == MPI_SUCCESS) {
    *request = new_request(req);
  }
}
Exemplo n.º 13
0
double logistic_regression::calc_loss() {
    double loss = 0.;
    for(size_t i = 0; i < samples.size(); ++i) {
        double j = lg_hypothesis(samples[i]);
        loss += j * j;
    }
    auto worker_comm = get_comm();
    worker_comm.allreduce(loss);
    int sz = samples.size();
    worker_comm.allreduce(sz);
    return loss / sz;
}
Exemplo n.º 14
0
  void solve() {
    init();
    // superstep 0
    for(auto & kv : vertex_val_map) {
      paracel_bupdate(kv.first, 
                      kv.second, 
                      "/mfs/user/wuhong/paracel/local/lib/libmvv_update.so", 
                      "max_updater");
    }
    sync();
    // following supersteps
    while(1) {
      int local_halt_flag = 1;
      // following supersteps
      for(auto & kv : vertex_active_map) {
        std::string v = kv.first;
        // if vertex is active
        if(kv.second) {
          local_halt_flag = 0;
          // iter outgoing edges
          for(auto & edge_info : vertex_adj_edge_val_map[v]) {
            std::string link_v = edge_info.first;
            paracel_bupdate(link_v, 
                            vertex_val_map[v], 
                            "/mfs/user/wuhong/paracel/local/lib/libmvv_update.so",
                            "max_updater");
          }
        }
      }
      sync();

      // update vertex_active_map
      for(auto & kv : vertex_active_map) {
        std::string vertex = kv.first;
        double new_val = paracel_read<double>(vertex);
        double old_val = vertex_val_map[vertex];
        vertex_val_map[vertex] = new_val; // local update
        if(new_val == old_val) {
          vertex_active_map[vertex] = false; // vote to halt
        } else {
          vertex_active_map[vertex] = true; // reactive
        }
      }
      sync();
      get_comm().allreduce(local_halt_flag);
      if(local_halt_flag == get_worker_size()) {
        break;
      }
      sync();
    }
    sync();
  } // solve
Exemplo n.º 15
0
static void comm_init(struct thread_map *map, int i)
{
	pid_t pid = thread_map__pid(map, i);
	char *comm = NULL;

	/* dummy pid comm initialization */
	if (pid == -1) {
		map->map[i].comm = strdup("dummy");
		return;
	}

	/*
	 * The comm name is like extra bonus ;-),
	 * so just warn if we fail for any reason.
	 */
	if (get_comm(&comm, pid))
		pr_warning("Couldn't resolve comm name for pid %d\n", pid);

	map->map[i].comm = comm;
}
Exemplo n.º 16
0
void mpi_alltoallv_(void* sendbuf, int* sendcounts, int* senddisps, int* sendtype,
                    void* recvbuf, int* recvcounts, int* recvdisps, int* recvtype, int* comm, int* ierr) {
  *ierr = MPI_Alltoallv(sendbuf, sendcounts, senddisps, get_datatype(*sendtype),
                       recvbuf, recvcounts, recvdisps, get_datatype(*recvtype), get_comm(*comm));
}
Exemplo n.º 17
0
void mpi_alltoall_(void* sendbuf, int* sendcount, int* sendtype,
                    void* recvbuf, int* recvcount, int* recvtype, int* comm, int* ierr) {
  *ierr = MPI_Alltoall(sendbuf, *sendcount, get_datatype(*sendtype),
                       recvbuf, *recvcount, get_datatype(*recvtype), get_comm(*comm));
}
Exemplo n.º 18
0
void mpi_scan_(void* sendbuf, void* recvbuf, int* count, int* datatype,
                int* op, int* comm, int* ierr) {
  *ierr = MPI_Scan(sendbuf, recvbuf, *count, get_datatype(*datatype),
                   get_op(*op), get_comm(*comm));
}
Exemplo n.º 19
0
void mpi_allgatherv_(void* sendbuf, int* sendcount, int* sendtype,
                     void* recvbuf, int* recvcounts,int* displs, int* recvtype,
                     int* comm, int* ierr) {
  *ierr = MPI_Allgatherv(sendbuf, *sendcount, get_datatype(*sendtype),
                        recvbuf, recvcounts, displs, get_datatype(*recvtype), get_comm(*comm));
}
Exemplo n.º 20
0
void mpi_gather_(void* sendbuf, int* sendcount, int* sendtype,
                  void* recvbuf, int* recvcount, int* recvtype,
                  int* root, int* comm, int* ierr) {
  *ierr = MPI_Gather(sendbuf, *sendcount, get_datatype(*sendtype),
                     recvbuf, *recvcount, get_datatype(*recvtype), *root, get_comm(*comm));
}
Exemplo n.º 21
0
void mpi_scatterv_(void* sendbuf, int* sendcounts, int* displs, int* sendtype,
                   void* recvbuf, int* recvcount, int* recvtype,
                   int* root, int* comm, int* ierr) {
  *ierr = MPI_Scatterv(sendbuf, sendcounts, displs, get_datatype(*sendtype),
                      recvbuf, *recvcount, get_datatype(*recvtype), *root, get_comm(*comm));
}
Exemplo n.º 22
0
void mpi_allreduce_(void* sendbuf, void* recvbuf, int* count, int* datatype,
                     int* op, int* comm, int* ierr) {
  *ierr = MPI_Allreduce(sendbuf, recvbuf, *count, get_datatype(*datatype),
                        get_op(*op), get_comm(*comm));
}
Exemplo n.º 23
0
void mpi_bcast_(void *buf, int* count, int* datatype, int* root, int* comm, int* ierr) {
  *ierr = MPI_Bcast(buf, *count, get_datatype(*datatype), *root, get_comm(*comm));
}
Exemplo n.º 24
0
void mpi_barrier_(int* comm, int* ierr) {
  *ierr = MPI_Barrier(get_comm(*comm));
}
Exemplo n.º 25
0
 void generate_1000_ids_on_all_procs()
 {
     sideIdPool->generate_initial_ids(numInitialIdsToRequestPerProc);
     const stk::mesh::EntityIdVector &idsThisProc = sideIdPool->my_get_all_ids();
     stk::parallel_vector_concat(get_comm(), idsThisProc, ids);
 }
Exemplo n.º 26
0
void mpi_attr_get_(int* comm, int* keyval, void* attr_value, int* flag, int* ierr ){
  *ierr = MPI_Attr_get(get_comm(*comm), *keyval, attr_value, flag);
}
Exemplo n.º 27
0
void mpi_win_create_( int *base, MPI_Aint* size, int* disp_unit, int* info, int* comm, int *win, int* ierr){
  *ierr =  MPI_Win_create( (void*)base, *size, *disp_unit, *(MPI_Info*)info, get_comm(*comm),(MPI_Win*)win);
}
Exemplo n.º 28
0
void mpi_reduce_scatter_(void* sendbuf, void* recvbuf, int* recvcounts, int* datatype,
                     int* op, int* comm, int* ierr) {
  *ierr = MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, get_datatype(*datatype),
                        get_op(*op), get_comm(*comm));
}
Exemplo n.º 29
0
 void generate_5_additional_ids_on_all_procs()
 {
     sideIdPool->generate_additional_ids_collective(numAdditionalIdsToRequestPerProc);
     const stk::mesh::EntityIdVector &idsThisProc = sideIdPool->my_get_all_ids();
     stk::parallel_vector_concat(get_comm(), idsThisProc, ids);
 }
Exemplo n.º 30
0
void mpi_recv_(void* buf, int* count, int* datatype, int* src,
                int* tag, int* comm, MPI_Status* status, int* ierr) {
   *ierr = MPI_Recv(buf, *count, get_datatype(*datatype), *src, *tag,
                    get_comm(*comm), status);
}