Пример #1
0
/*
 * Clusters data.
 */
int main(int argc, char **argv)
{
	((void)argc);
	
	rank = atoi(argv[0]);
	
	/* Setup interprocess communication. */
	open_noc_connectors();
	sync_master();
	
	getwork();
	
	kmeans();
	
	data_send(outfd, &total, sizeof(uint64_t));
	close_noc_connectors();
	mppa_exit(0);
	return (0);
}
Пример #2
0
Status
syncRepo(const std::string &syncDir, const std::string &syncKey, bool &dirty)
{
    AutoSyncLock lock(gSyncMutex);

    AutoFree<git_repository, git_repository_free> repo;
    ABC_CHECK_GIT(git_repository_open(&repo.get(), syncDir.c_str()));

    std::string url;

    ABC_CHECK(syncUrl(url, syncKey));

    for (int i = 0; i < syncServers.size(); i++)
    {
        ABC_CHECK(syncUrl(url, syncKey, true));
        if (sync_fetch(repo, url.c_str()) >= 0)
        {
            ABC_DebugLog("Syncing to: %s", url.c_str());
            break;
        }
        else
        {
            ABC_DebugLog("FAIlED Syncing to: %s", url.c_str());
        }
    }

    int files_changed, need_push;
    ABC_CHECK_GIT(sync_master(repo, &files_changed, &need_push));

    if (need_push)
        ABC_CHECK_GIT(sync_push(repo, url.c_str()));

    // If this fails, the app has been shut down, leaving us for dead.
    // We will crash anyhow, but this at least makes it official:
    assert(gContext);

    dirty = !!files_changed;
    return Status();
}
Пример #3
0
void vt_sync(MPI_Comm comm, uint64_t* ltime, int64_t* offset)
{
  VT_MPI_INT myrank, myrank_host, myrank_sync;
  VT_MPI_INT numnodes;
  uint64_t time;

  MPI_Comm host_comm;
  MPI_Comm sync_comm;

  VT_SUSPEND_IO_TRACING(VT_CURRENT_THREAD);

  /* mark begin of clock synchronization */
  time = vt_pform_wtime();
  vt_enter(VT_CURRENT_THREAD, &time, vt_trc_regid[VT__TRC_SYNCTIME]);

  /* barrier at entry */
  PMPI_Barrier(comm);

  *offset = 0;
  *ltime = vt_pform_wtime();

  PMPI_Comm_rank(comm, &myrank);

  /* create communicator containing all processes on the same node */

  PMPI_Comm_split(comm, (vt_pform_node_id() & 0x7FFFFFFF), 0, &host_comm);
  PMPI_Comm_rank(host_comm, &myrank_host);

  /* create communicator containing all processes with rank zero in the
     previously created communicators */
  
  PMPI_Comm_split(comm, myrank_host, 0, &sync_comm);
  PMPI_Comm_rank(sync_comm, &myrank_sync);
  PMPI_Comm_size(sync_comm, &numnodes);

  /* measure offsets between all nodes and the root node (rank 0 in sync_comm) */

  if (myrank_host == 0)
  {
    VT_MPI_INT i;

    for (i = 1; i < numnodes; i++)
    {
      PMPI_Barrier(sync_comm);
      if (myrank_sync == i)
	*offset = sync_slave(ltime, 0, sync_comm);
      else if (myrank_sync == 0)
	*offset = sync_master(ltime, i, sync_comm);
    }
  }

  /* distribute offset and ltime across all processes on the same node */

  PMPI_Bcast(offset, 1, MPI_LONG_LONG_INT, 0, host_comm);
  PMPI_Bcast(ltime, 1, MPI_LONG_LONG_INT, 0, host_comm);

  PMPI_Comm_free(&host_comm);
  PMPI_Comm_free(&sync_comm);

  /* barrier at exit */
  PMPI_Barrier(comm);

  /* mark end of clock synchronization */
  time = vt_pform_wtime();
  vt_exit(VT_CURRENT_THREAD, &time);

  VT_RESUME_IO_TRACING(VT_CURRENT_THREAD);
}