Ejemplo n.º 1
0
int fowMPINONB ( const char *filename )
{
  static aFiledataM *of;
  static unsigned long buffersize = 0;
  int id;
  enum {
    bcastRoot = 0
  };
  MPI_Comm commNode = commInqCommNode ();
  int rankNode = commInqRankNode ();

  /* broadcast buffersize to collectors ( just once, for all files )*/

  if (!buffersize)
    {
      if (rankNode == bcastRoot)
        buffersize = findWriteAccumBufsize();
      xmpi(MPI_Bcast(&buffersize, 1, MPI_UNSIGNED_LONG, bcastRoot, commNode));
    }

  xdebug("buffersize=%ld", buffersize);

  listSetForeach(bibAFiledataM, elemCheck, (void *)filename);
  of = initAFiledataMPINONB(filename, (size_t)buffersize);

  if ((id = listSetAdd(bibAFiledataM, of)) < 0 )
    xabort("filename %s not unique", of->name);

  xdebug("IOPE%d: name=%s, init and added aFiledataM, return id = %d",
         rankNode, filename, id);
  of->fileID = id;
  return id;
}
Ejemplo n.º 2
0
/*
 * intercept - This is where the highest level logic of hotpatching
 * is described. Upon startup, this routine looks for libc, and libpthread.
 * If these libraries are found in the process's address space, they are
 * patched.
 *
 * This is init routine of syscall_intercept. This library constructor
 * must be in a TU which also contains public symbols, otherwise linkers
 * might just get rid of the whole object file containing it, when linking
 * statically with libsyscall_intercept.
 */
static __attribute__((constructor)) void
intercept(int argc, char **argv)
{
	(void) argc;
	cmdline = argv[0];

	if (!syscall_hook_in_process_allowed())
		return;

	vdso_addr = (void *)(uintptr_t)getauxval(AT_SYSINFO_EHDR);
	debug_dumps_on = getenv("INTERCEPT_DEBUG_DUMP") != NULL;
	patch_all_objs = (getenv("INTERCEPT_ALL_OBJS") != NULL);
	intercept_setup_log(getenv("INTERCEPT_LOG"),
			getenv("INTERCEPT_LOG_TRUNC"));
	log_header();
	init_patcher();

	dl_iterate_phdr(analyze_object, NULL);
	if (!libc_found)
		xabort("libc not found");

	mprotect_asm_wrappers();
	for (unsigned i = 0; i < objs_count; ++i)
		activate_patches(objs + i);
}
Ejemplo n.º 3
0
static aFiledataM *initAFiledataMPINONB ( const char *filename, size_t bs )
{
  aFiledataM *of = NULL;
  int iret;
  MPI_Comm commNode = commInqCommNode ();

  of = (aFiledataM*) xmalloc(sizeof (*of) + strlen(filename) + 1);

  strcpy(of->name, filename);
  of->size = bs;
  of->db1 = NULL;
  of->db2 = NULL;

  /* init output buffer */

  iret = dbuffer_init ( &( of->db1 ), of->size );
  iret += dbuffer_init ( &( of->db2 ), of->size );

  if ( iret > 0 ) xabort ( "dbuffer_init did not succeed" );

  of->db = of->db1;

  of->tsID = CDI_UNDEFID;

  /* open file */
  xmpi(MPI_File_open(commNode, of->name, MPI_MODE_CREATE|MPI_MODE_WRONLY,
                     MPI_INFO_NULL, &( of->fh )));
  of->request = MPI_REQUEST_NULL;
  of->finished = false;

  return of;
}
Ejemplo n.º 4
0
static void
elemCheck(void *q, void *nm)
{
  aFiledataM *afm = q;
  const char *name = nm;

  if (!strcmp(name, afm->name))
    xabort("Filename %s has already been added to set\n", name);
}
Ejemplo n.º 5
0
void finalizeMPINONB(void)
{
  if (!listSetIsEmpty(bibAFiledataM))
    xabort("set bibAFiledataM not empty");
  else
    {
      xdebug("%s", "destroy set");
      listSetDelete(bibAFiledataM);
    }
}
Ejemplo n.º 6
0
/*
 * should_patch_object
 * Decides whether a particular loaded object should should be targeted for
 * hotpatching.
 * Always skipped: [vdso], and the syscall_intercept library itself.
 * Besides these two, if patch_all_objs is true, everything object is
 * a target. When patch_all_objs is false, only libraries that are parts of
 * the glibc implementation are targeted, i.e.: libc and libpthread.
 */
static bool
should_patch_object(uintptr_t addr, const char *path)
{
	static uintptr_t self_addr;
	if (self_addr == 0) {
		extern unsigned char intercept_asm_wrapper_tmpl[];
		Dl_info self;
		if (!dladdr((void *)&intercept_asm_wrapper_tmpl, &self))
			xabort("self dladdr failure");
		self_addr = (uintptr_t)self.dli_fbase;
	}

	static const char libc[] = "libc";
	static const char pthr[] = "libpthread";
	static const char caps[] = "libcapstone";

	if (is_vdso(addr, path)) {
		debug_dump(" - skipping: is_vdso\n");
		return false;
	}

	const char *name = get_lib_short_name(path);
	size_t len = strcspn(name, "-.");

	if (len == 0)
		return false;

	if (addr == self_addr) {
		debug_dump(" - skipping: matches self\n");
		return false;
	}

	if (str_match(name, len, caps)) {
		debug_dump(" - skipping: matches capstone\n");
		return false;
	}

	if (str_match(name, len, libc)) {
		debug_dump(" - libc found\n");
		libc_found = true;
		return true;
	}

	if (patch_all_objs)
		return true;

	if (str_match(name, len, pthr)) {
		debug_dump(" - libpthread found\n");
		return true;
	}

	debug_dump(" - skipping, patch_all_objs == false\n");
	return false;
}
Ejemplo n.º 7
0
void
initMPINONB(void (*postCommSetupActions)(void))
{
  commDefCommColl ( 1 );
  commSendNodeInfo ();
  commRecvNodeMap ();
  commDefCommsIO ();
  postCommSetupActions();
  bibAFiledataM = listSetNew( destroyAFiledataMPINONB, compareNamesMPINONB );

  if ( bibAFiledataM == NULL )
    xabort ( "listSetNew did not succeed" );
}
Ejemplo n.º 8
0
int fcMPINONB ( int fileID )
{
  aFiledataM *of;
  int rankNode = commInqRankNode ();

  xdebug("IOPE%d: write buffer, close file and cleanup, in %d",
         rankNode, fileID );

  if (!(of = listSetGet(bibAFiledataM, fileIDTest, (void *)(intptr_t)fileID)))
    xabort("listSet, fileID=%d not found", fileID);

  writeMPINONB(of);

  /* remove file element */
  int iret = listSetRemove(bibAFiledataM, fileIDTest, (void *)(intptr_t)fileID);
  return iret;
}
Ejemplo n.º 9
0
size_t fwMPINONB ( int fileID, int tsID, const void *buffer, size_t len )
{
  int error = 0;
  int filled = 0;
  aFiledataM *of;
  int rankNode = commInqRankNode ();

  of = listSetGet(bibAFiledataM, fileIDTest, (void *)(intptr_t)fileID);
  xassert(of);

  bool flush = tsID != of->tsID;

  if (flush)
    {
      xdebug3("IOPE%d: tsID = %d, flush buffer", rankNode, tsID);
      writeMPINONB(of);
      of->tsID = tsID;
      MPI_Status status;
      xmpi(MPI_Wait(&(of->request), &status));
      xmpi(MPI_Barrier(commInqCommNode()));
    }

  filled = dbuffer_push ( of->db, ( unsigned char * ) buffer, len );

  xdebug3 ( "IOPE%d: fileID = %d, tsID = %d,"
           " pushed data on buffer, filled = %d",
           rankNode, fileID, tsID, filled );

  if ( filled == 1 )
    {
      if ( flush )
        error = filled;
      else
        {
          writeMPINONB(of);

          error = dbuffer_push ( of->db, ( unsigned char * ) buffer, len );
        }
    }

  if ( error == 1 )
    xabort("did not succeed filling output buffer, fileID=%d", fileID);

  return len;
}
Ejemplo n.º 10
0
/*
 * write out a single bucket
 */
static int
flushiebuck(IEBucks *ib, int b, int reset)
{
	uint32_t n;

	if(ib->bucks[b].used == 0)
		return 0;
	n = ib->bucks[b].used;
	U32PUT(&ib->bucks[b].buf[n], ib->bucks[b].head);
	n += U32Size;
	USED(n);
	if(writepart(ib->part, (uint64_t)ib->chunks * ib->size, ib->bucks[b].buf, ib->size) < 0){
		seterr(EOk, "can't write sorting bucket to file: %r");
xabort();
		return -1;
	}
	ib->bucks[b].head = ib->chunks++;
	ib->bucks[b].total += ib->bucks[b].used;
	if(reset)
		ib->bucks[b].used = 0;
	return 0;
}
Ejemplo n.º 11
0
static void readGetBuffers()
{
  int nProcsModel = commInqNProcsModel ();
  int root        = commInqRootGlob ();
#ifdef HAVE_NETCDF4
  int myCollRank = commInqRankColl();
  MPI_Comm collComm = commInqCommColl();
#endif
  xdebug("%s", "START");

  struct winHeaderEntry *winDict
    = (struct winHeaderEntry *)rxWin[root].buffer;
  xassert(winDict[0].id == HEADERSIZEMARKER);
  {
    int dictSize = rxWin[root].dictSize,
      firstNonRPCEntry = dictSize - winDict[0].specific.headerSize.numRPCEntries - 1,
      headerIdx,
      numFuncCalls = 0;
    for (headerIdx = dictSize - 1;
         headerIdx > firstNonRPCEntry;
         --headerIdx)
      {
        xassert(winDict[headerIdx].id >= MINFUNCID
                && winDict[headerIdx].id <= MAXFUNCID);
        ++numFuncCalls;
        readFuncCall(winDict + headerIdx);
      }
    xassert(numFuncCalls == winDict[0].specific.headerSize.numRPCEntries);
  }
  /* build list of streams, data was transferred for */
  {
    struct streamMap map = buildStreamMap(winDict);
    double *data = NULL;
#ifdef HAVE_NETCDF4
    int *varIsWritten = NULL;
#endif
#if defined (HAVE_PARALLEL_NC4)
    double *writeBuf = NULL;
#endif
    int currentDataBufSize = 0;
    for (int streamIdx = 0; streamIdx < map.numEntries; ++streamIdx)
      {
        int streamID = map.entries[streamIdx].streamID;
        int vlistID = streamInqVlist(streamID);
        int filetype = map.entries[streamIdx].filetype;

        switch (filetype)
          {
          case FILETYPE_GRB:
          case FILETYPE_GRB2:
            writeGribStream(winDict, map.entries + streamIdx,
                            &data, &currentDataBufSize,
                            root, nProcsModel);
            break;
#ifdef HAVE_NETCDF4
          case FILETYPE_NC:
          case FILETYPE_NC2:
          case FILETYPE_NC4:
#ifdef HAVE_PARALLEL_NC4
            /* HAVE_PARALLE_NC4 implies having ScalES-PPM and yaxt */
            {
              int nvars = map.entries[streamIdx].numVars;
              int *varMap = map.entries[streamIdx].varMap;
              buildWrittenVars(map.entries + streamIdx, &varIsWritten,
                               myCollRank, collComm);
              for (int varID = 0; varID < nvars; ++varID)
                if (varIsWritten[varID])
                  {
                    struct PPM_extent varShape[3];
                    queryVarBounds(varShape, vlistID, varID);
                    struct xyzDims collGrid = varDimsCollGridMatch(varShape);
                    xdebug("writing varID %d with dimensions: "
                           "x=%d, y=%d, z=%d,\n"
                           "found distribution with dimensions:"
                           " x=%d, y=%d, z=%d.", varID,
                           varShape[0].size, varShape[1].size, varShape[2].size,
                           collGrid.sizes[0], collGrid.sizes[1],
                           collGrid.sizes[2]);
                    struct PPM_extent varChunk[3];
                    myVarPart(varShape, collGrid, varChunk);
                    int myChunk[3][2];
                    for (int i = 0; i < 3; ++i)
                      {
                        myChunk[i][0] = PPM_extent_start(varChunk[i]);
                        myChunk[i][1] = PPM_extent_end(varChunk[i]);
                      }
                    xdebug("Writing chunk { { %d, %d }, { %d, %d },"
                           " { %d, %d } }", myChunk[0][0], myChunk[0][1],
                           myChunk[1][0], myChunk[1][1], myChunk[2][0],
                           myChunk[2][1]);
                    Xt_int varSize[3];
                    for (int i = 0; i < 3; ++i)
                      varSize[2 - i] = varShape[i].size;
                    Xt_idxlist preRedistChunk, preWriteChunk;
                    /* prepare yaxt descriptor for current data
                       distribution after collect */
                    int nmiss;
                    if (varMap[varID] == -1)
                      {
                        preRedistChunk = xt_idxempty_new();
                        xdebug("%s", "I got none\n");
                      }
                    else
                      {
                        Xt_int preRedistStart[3] = { 0, 0, 0 };
                        preRedistChunk
                          = xt_idxsection_new(0, 3, varSize, varSize,
                                              preRedistStart);
                        resizeVarGatherBuf(vlistID, varID, &data,
                                           &currentDataBufSize);
                        int headerIdx = varMap[varID];
                        gatherArray(root, nProcsModel, headerIdx,
                                    vlistID, data, &nmiss);
                        xdebug("%s", "I got all\n");
                      }
                    MPI_Bcast(&nmiss, 1, MPI_INT, varIsWritten[varID] - 1,
                              collComm);
                    /* prepare yaxt descriptor for write chunk */
                    {
                      Xt_int preWriteChunkStart[3], preWriteChunkSize[3];
                      for (int i = 0; i < 3; ++i)
                        {
                          preWriteChunkStart[2 - i] = varChunk[i].first;
                          preWriteChunkSize[2 - i] = varChunk[i].size;
                        }
                      preWriteChunk = xt_idxsection_new(0, 3, varSize,
                                                        preWriteChunkSize,
                                                        preWriteChunkStart);
                    }
                    /* prepare redistribution */
                    {
                      Xt_xmap xmap = xt_xmap_all2all_new(preRedistChunk,
                                                         preWriteChunk,
                                                         collComm);
                      Xt_redist redist = xt_redist_p2p_new(xmap, MPI_DOUBLE);
                      xt_idxlist_delete(preRedistChunk);
                      xt_idxlist_delete(preWriteChunk);
                      xt_xmap_delete(xmap);
                      writeBuf = (double*) xrealloc(writeBuf,
                                                    sizeof (double)
                                                    * PPM_extents_size(3, varChunk));
                      xt_redist_s_exchange1(redist, data, writeBuf);
                      xt_redist_delete(redist);
                    }
                    /* write chunk */
                    streamWriteVarChunk(streamID, varID,
                                        (const int (*)[2])myChunk, writeBuf,
                                        nmiss);
                  }
            }
#else
            /* determine process which has stream open (writer) and
             * which has data for which variable (var owner)
             * three cases need to be distinguished */
            {
              int nvars = map.entries[streamIdx].numVars;
              int *varMap = map.entries[streamIdx].varMap;
              buildWrittenVars(map.entries + streamIdx, &varIsWritten,
                               myCollRank, collComm);
              int writerRank;
              if ((writerRank = cdiPioSerialOpenFileMap(streamID))
                  == myCollRank)
                {
                  for (int varID = 0; varID < nvars; ++varID)
                    if (varIsWritten[varID])
                      {
                        int nmiss;
                        int size = vlistInqVarSize(vlistID, varID);
                        resizeVarGatherBuf(vlistID, varID, &data,
                                           &currentDataBufSize);
                        int headerIdx = varMap[varID];
                        if (varIsWritten[varID] == myCollRank + 1)
                          {
                            /* this process has the full array and will
                             * write it */
                            xdebug("gathering varID=%d for direct writing",
                                   varID);
                            gatherArray(root, nProcsModel, headerIdx,
                                        vlistID, data, &nmiss);
                          }
                        else
                          {
                            /* another process has the array and will
                             * send it over */
                            MPI_Status stat;
                            xdebug("receiving varID=%d for writing from"
                                   " process %d",
                                   varID, varIsWritten[varID] - 1);
                            xmpiStat(MPI_Recv(&nmiss, 1, MPI_INT,
                                              varIsWritten[varID] - 1,
                                              COLLBUFNMISS,
                                              collComm, &stat), &stat);
                            xmpiStat(MPI_Recv(data, size, MPI_DOUBLE,
                                              varIsWritten[varID] - 1,
                                              COLLBUFTX,
                                              collComm, &stat), &stat);
                          }
                        streamWriteVar(streamID, varID, data, nmiss);
                      }
                }
              else
                for (int varID = 0; varID < nvars; ++varID)
                  if (varIsWritten[varID] == myCollRank + 1)
                    {
                      /* this process has the full array and another
                       * will write it */
                      int nmiss;
                      int size = vlistInqVarSize(vlistID, varID);
                      resizeVarGatherBuf(vlistID, varID, &data,
                                         &currentDataBufSize);
                      int headerIdx = varMap[varID];
                      gatherArray(root, nProcsModel, headerIdx,
                                  vlistID, data, &nmiss);
                      MPI_Request req;
                      MPI_Status stat;
                      xdebug("sending varID=%d for writing to"
                             " process %d",
                             varID, writerRank);
                      xmpi(MPI_Isend(&nmiss, 1, MPI_INT,
                                     writerRank, COLLBUFNMISS,
                                     collComm, &req));
                      xmpi(MPI_Send(data, size, MPI_DOUBLE,
                                    writerRank, COLLBUFTX,
                                    collComm));
                      xmpiStat(MPI_Wait(&req, &stat), &stat);
                    }
            }
#endif
            break;
#endif
          default:
            xabort("unhandled filetype in parallel I/O.");
          }
      }
#ifdef HAVE_NETCDF4
    free(varIsWritten);
#ifdef HAVE_PARALLEL_NC4
    free(writeBuf);
#endif
#endif
    free(map.entries);
    free(data);
  }
  xdebug("%s", "RETURN");
} 
Ejemplo n.º 12
0
static void
gatherArray(int root, int nProcsModel, int headerIdx,
            int vlistID,
            double *gatherBuf, int *nmiss)
{
  struct winHeaderEntry *winDict
    = (struct winHeaderEntry *)rxWin[root].buffer;
  int streamID = winDict[headerIdx].id;
  int varID = winDict[headerIdx].specific.dataRecord.varID;
  int varShape[3] = { 0, 0, 0 };
  cdiPioQueryVarDims(varShape, vlistID, varID);
  Xt_int varShapeXt[3];
  static const Xt_int origin[3] = { 0, 0, 0 };
  for (unsigned i = 0; i < 3; ++i)
    varShapeXt[i] = varShape[i];
  int varSize = varShape[0] * varShape[1] * varShape[2];
  struct Xt_offset_ext *partExts
    = xmalloc((size_t)nProcsModel * sizeof (partExts[0]));
  Xt_idxlist *part = xmalloc((size_t)nProcsModel * sizeof (part[0]));
  MPI_Comm commCalc = commInqCommCalc();
  {
    int nmiss_ = 0;
    for (int modelID = 0; modelID < nProcsModel; modelID++)
      {
        struct dataRecord *dataHeader
          = &((struct winHeaderEntry *)
              rxWin[modelID].buffer)[headerIdx].specific.dataRecord;
        int position =
          ((struct winHeaderEntry *)rxWin[modelID].buffer)[headerIdx + 1].offset;
        xassert(namespaceAdaptKey2(((struct winHeaderEntry *)
                                    rxWin[modelID].buffer)[headerIdx].id)
                == streamID
                && dataHeader->varID == varID
                && ((struct winHeaderEntry *)
                    rxWin[modelID].buffer)[headerIdx + 1].id == PARTDESCMARKER
                && position > 0
                && ((size_t)position
                    >= sizeof (struct winHeaderEntry) * (size_t)rxWin[modelID].dictSize)
                && ((size_t)position < rxWin[modelID].size));
        part[modelID] = xt_idxlist_unpack(rxWin[modelID].buffer,
                                          (int)rxWin[modelID].size,
                                          &position, commCalc);
        unsigned partSize = (unsigned)xt_idxlist_get_num_indices(part[modelID]);
        size_t charOfs = (size_t)((rxWin[modelID].buffer
                                   + ((struct winHeaderEntry *)
                                      rxWin[modelID].buffer)[headerIdx].offset)
                                  - rxWin[0].buffer);
        xassert(charOfs % sizeof (double) == 0
                && charOfs / sizeof (double) + partSize <= INT_MAX);
        int elemOfs = (int)(charOfs / sizeof (double));
        partExts[modelID].start = elemOfs;
        partExts[modelID].size = (int)partSize;
        partExts[modelID].stride = 1;
        nmiss_ += dataHeader->nmiss;
      }
    *nmiss = nmiss_;
  }
  Xt_idxlist srcList = xt_idxlist_collection_new(part, nProcsModel);
  for (int modelID = 0; modelID < nProcsModel; modelID++)
    xt_idxlist_delete(part[modelID]);
  free(part);
  Xt_xmap gatherXmap;
  {
    Xt_idxlist dstList
      = xt_idxsection_new(0, 3, varShapeXt, varShapeXt, origin);
    struct Xt_com_list full = { .list = dstList, .rank = 0 };
    gatherXmap = xt_xmap_intersection_new(1, &full, 1, &full, srcList, dstList,
                                          MPI_COMM_SELF);
    xt_idxlist_delete(dstList);
  }
  xt_idxlist_delete(srcList);

  struct Xt_offset_ext gatherExt = { .start = 0, .size = varSize, .stride = 1 };
  Xt_redist gatherRedist
    = xt_redist_p2p_ext_new(gatherXmap, nProcsModel, partExts, 1, &gatherExt,
                            MPI_DOUBLE);
  xt_xmap_delete(gatherXmap);
  xt_redist_s_exchange1(gatherRedist, rxWin[0].buffer, gatherBuf);
  free(partExts);
  xt_redist_delete(gatherRedist);
}

struct xyzDims
{
  int sizes[3];
};

static inline int
xyzGridSize(struct xyzDims dims)
{
  return dims.sizes[0] * dims.sizes[1] * dims.sizes[2];
}

#ifdef HAVE_PARALLEL_NC4
static void
queryVarBounds(struct PPM_extent varShape[3], int vlistID, int varID)
{
  varShape[0].first = 0;
  varShape[1].first = 0;
  varShape[2].first = 0;
  int sizes[3];
  cdiPioQueryVarDims(sizes, vlistID, varID);
  for (unsigned i = 0; i < 3; ++i)
    varShape[i].size = sizes[i];
}

/* compute distribution of collectors such that number of collectors
 * <= number of variable grid cells in each dimension */
static struct xyzDims
varDimsCollGridMatch(const struct PPM_extent varDims[3])
{
  xassert(PPM_extents_size(3, varDims) >= commInqSizeColl());
  struct xyzDims collGrid = { { 1, 1, 1 } };
  /* because of storage order, dividing dimension 3 first is preferred */
  for (int i = 0; i < numPioPrimes; ++i)
    {
      for (int dim = 2; dim >=0; --dim)
        if (collGrid.sizes[dim] * pioPrimes[i] <= varDims[dim].size)
          {
            collGrid.sizes[dim] *= pioPrimes[i];
            goto nextPrime;
          }
      /* no position found, retrack */
      xabort("Not yet implemented back-tracking needed.");
      nextPrime:
      ;
    }
  return collGrid;
}
Ejemplo n.º 13
0
static void
readFuncCall(struct winHeaderEntry *header)
{
  int root = commInqRootGlob ();
  int funcID = header->id;
  union funcArgs *funcArgs = &(header->specific.funcArgs);

  xassert(funcID >= MINFUNCID && funcID <= MAXFUNCID);
  switch ( funcID )
    {
    case STREAMCLOSE:
      {
        int streamID
          = namespaceAdaptKey2(funcArgs->streamChange.streamID);
        streamClose(streamID);
        xdebug("READ FUNCTION CALL FROM WIN:  %s, streamID=%d,"
               " closed stream",
               funcMap[(-1 - funcID)], streamID);
      }
      break;
    case STREAMOPEN:
      {
        size_t filenamesz = (size_t)funcArgs->newFile.fnamelen;
        xassert ( filenamesz > 0 && filenamesz < MAXDATAFILENAME );
        const char *filename
          = (const char *)(rxWin[root].buffer + header->offset);
        xassert(filename[filenamesz] == '\0');
        int filetype = funcArgs->newFile.filetype;
        int streamID = streamOpenWrite(filename, filetype);
        xassert(streamID != CDI_ELIBNAVAIL);
        xdebug("READ FUNCTION CALL FROM WIN:  %s, filenamesz=%zu,"
               " filename=%s, filetype=%d, OPENED STREAM %d",
               funcMap[(-1 - funcID)], filenamesz, filename,
               filetype, streamID);
      }
      break;
    case STREAMDEFVLIST:
      {
        int streamID
          = namespaceAdaptKey2(funcArgs->streamChange.streamID);
        int vlistID = namespaceAdaptKey2(funcArgs->streamChange.vlistID);
        streamDefVlist(streamID, vlistID);
        xdebug("READ FUNCTION CALL FROM WIN:  %s, streamID=%d,"
               " vlistID=%d, called streamDefVlist ().",
               funcMap[(-1 - funcID)], streamID, vlistID);
      }
      break;
    case STREAMDEFTIMESTEP:
      {
        MPI_Comm commCalc = commInqCommCalc ();
        int streamID = funcArgs->streamNewTimestep.streamID;
        int originNamespace = namespaceResHDecode(streamID).nsp;
        streamID = namespaceAdaptKey2(streamID);
        int oldTaxisID
          = vlistInqTaxis(streamInqVlist(streamID));
        int position = header->offset;
        int changedTaxisID
          = taxisUnpack((char *)rxWin[root].buffer, (int)rxWin[root].size,
                        &position, originNamespace, &commCalc, 0);
        taxis_t *oldTaxisPtr = taxisPtr(oldTaxisID);
        taxis_t *changedTaxisPtr = taxisPtr(changedTaxisID);
        ptaxisCopy(oldTaxisPtr, changedTaxisPtr);
        taxisDestroy(changedTaxisID);
        streamDefTimestep(streamID, funcArgs->streamNewTimestep.tsID);
      }
      break;
    default:
      xabort ( "REMOTE FUNCTIONCALL NOT IMPLEMENTED!" );
    }
}
Ejemplo n.º 14
0
void cdiPioServer(void (*postCommSetupActions)(void))
{
  int nProcsModel=commInqNProcsModel();
  static int nfinished = 0;
  MPI_Comm commCalc;
  MPI_Status status;

  xdebug("%s", "START");

  cdiPioFileWritingInit(postCommSetupActions);
  if (commInqRankNode() == commInqSpecialRankNode())
    return;
  commCalc = commInqCommCalc ();
#ifdef HAVE_PARALLEL_NC4
  cdiPioEnableNetCDFParAccess();
  numPioPrimes = PPM_prime_factorization_32((uint32_t)commInqSizeColl(),
                                            &pioPrimes);
#elif defined (HAVE_LIBNETCDF)
  cdiSerialOpenFileCount = xcalloc(sizeof (cdiSerialOpenFileCount[0]),
                                   (size_t)commInqSizeColl());
  namespaceSwitchSet(NSSWITCH_STREAM_OPEN_BACKEND,
                     NSSW_FUNC(cdiPioStreamCDFOpenWrap));
  namespaceSwitchSet(NSSWITCH_STREAM_CLOSE_BACKEND,
                     NSSW_FUNC(cdiPioStreamCDFCloseWrap));
  namespaceSwitchSet(NSSWITCH_CDF_DEF_TIMESTEP,
                     NSSW_FUNC(cdiPioCdfDefTimestep));
  namespaceSwitchSet(NSSWITCH_CDF_STREAM_SETUP,
                     NSSW_FUNC(cdiPioServerCdfDefVars));
#endif
  namespaceSwitchSet(NSSWITCH_FILE_WRITE,
                     NSSW_FUNC(cdiPioFileWrite));

  for ( ;; )
    {
      xmpi ( MPI_Probe ( MPI_ANY_SOURCE, MPI_ANY_TAG, commCalc, &status ));
      
      int source = status.MPI_SOURCE;
      int tag = status.MPI_TAG;
      
      switch ( tag )
        {
        case FINALIZE:
          xdebugMsg(tag, source, nfinished);
          xmpi(MPI_Recv(NULL, 0, MPI_INT, source, tag, commCalc, &status));
          xdebug("%s", "RECEIVED MESSAGE WITH TAG \"FINALIZE\"");
          nfinished++;
          xdebug("nfinished=%d, nProcsModel=%d", nfinished, nProcsModel);
          if ( nfinished == nProcsModel )
            {
              {
                unsigned nStreams = reshCountType(&streamOps);

                if ( nStreams > 0 )
                  {
                    int *resHs = xmalloc(nStreams * sizeof (resHs[0]));
                    cdiStreamGetIndexList(nStreams, resHs);
                    for (unsigned streamNo = 0; streamNo < nStreams; ++streamNo)
                      streamClose(resHs[streamNo]);
                    free(resHs);
                  }
              }
              cdiPioFileWritingFinalize();
              serverWinCleanup();
#ifdef HAVE_PARALLEL_NC4
              free(pioPrimes);
#endif
              /* listDestroy(); */
              xdebug("%s", "RETURN");
              return;
            }
	  
          break;
          
	case RESOURCES:
          {
            int size;
            xdebugMsg(tag, source, nfinished);
            xmpi(MPI_Get_count(&status, MPI_CHAR, &size));
            char *buffer = xmalloc((size_t)size);
            xmpi(MPI_Recv(buffer, size, MPI_PACKED, source,
                          tag, commCalc, &status));
            xdebug("%s", "RECEIVED MESSAGE WITH TAG \"RESOURCES\"");
            reshUnpackResources(buffer, size, &commCalc);
            xdebug("%s", "");
            free(buffer);
            int rankGlob = commInqRankGlob();
            if ( ddebug > 0 && rankGlob == nProcsModel)
              {
                static const char baseName[] = "reshListIOServer.",
                  suffix[] = ".txt";
                /* 9 digits for rank at most */
                char buf[sizeof (baseName) + 9 + sizeof (suffix) + 1];
                snprintf(buf, sizeof (buf), "%s%d%s", baseName, rankGlob,
                         suffix);
                FILE *fp = fopen(buf, "w");
                xassert(fp);
                reshListPrint(fp);
                fclose(fp);
              }
          }
          serverWinCreate ();
	  break;
	case WRITETS:
          {
            xdebugMsg(tag, source, nfinished);
            xmpi(MPI_Recv(NULL, 0, MPI_INT, source,
                          tag, commCalc, &status));
            xdebug("RECEIVED MESSAGE WITH TAG \"WRITETS\": source=%d",
                   source);
            getTimeStepData();
          }
	  break;

	default:
	  xabort ( "TAG NOT DEFINED!" );
	}
    }
}