Esempio n. 1
0
/*
 * python bulkload class method
 */
static PyObject *
python_sorted_btree_bulkload(PyObject *klass, PyObject *args) {
    int i, result;
    PyObject *item_list, *order, *iter, *item, *prev = NULL;

    if (!PyArg_ParseTuple(args, "OO!", &item_list, &PyInt_Type, &order))
        return NULL;

    if ((iter = PyObject_GetIter(item_list)) == NULL)
        return NULL;
    for (i = 0, item = PyIter_Next(iter); item;
            ++i, item = PyIter_Next(iter)) {
        if (i) {
            result = PyObject_RichCompareBool(prev, item, Py_LT);
            if (result <= 0) {
                if (!result)
                    PyErr_SetString(PyExc_ValueError,
                            "the bulkloaded list must already be sorted");
                Py_DECREF(prev);
                Py_DECREF(item);
                Py_DECREF(iter);
                return NULL;
            }
            Py_DECREF(prev);
        }
        prev = item;
    }
    if (prev) Py_DECREF(prev);
    Py_DECREF(iter);

    if ((iter = PyObject_GetIter(item_list)) == NULL)
        return NULL;

    btsort_pyobject *tree = PyObject_GC_New(
            btsort_pyobject, &btsort_pytypeobj);

    if (bulkload(tree, iter, (int)PyInt_AsLong(order)))
        return NULL;

    return (PyObject *)tree;
}
Esempio n. 2
0
int main(int argc, char **argv)
{
    mesh_t *mesh;
    double double_message[5];
    double x, y, z, factor = 0, vscut = 0;
    double elapsedtime;
#ifndef NO_OUTPUT
    int32_t eindex;
    int32_t remains, batch, idx;
    mrecord_t *partTable;
#endif

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &myID);
    MPI_Comm_size(MPI_COMM_WORLD, &theGroupSize);

    /* Read commandline arguments  */
    if (argc != 5) {
	if (myID == 0) {
	    fprintf(stderr, "usage: qmesh cvmdb physics.in numerical.in ");
	    fprintf(stderr, "meshdb\n");
	    fprintf(stderr,
		    "cvmdb: path to an etree database or a flat file.\n");
	    fprintf(stderr, "physics.in: path to physics.in.\n");
	    fprintf(stderr, "numerical.in: path to numerical.in.\n");
	    fprintf(stderr, "meshetree: path to the output mesh etree.\n");
	    fprintf(stderr, "\n");
	}
	MPI_Finalize();
	return -1;
    }

    /* Change the working directory to $LOCAL */
    /*
    localpath = getenv("LOCAL");
    if (localpath == NULL) {
        fprintf(stderr, "Thread %d: Cannot get $LOCAL value\n", myID);
        MPI_Abort(MPI_COMM_WORLD, ERROR);
        exit(1);
    }

    if (chdir(localpath) != 0) {
        fprintf(stderr, "Thread %d: Cannot chdir to %s\n", myID, localpath);
        MPI_Abort(MPI_COMM_WORLD, ERROR);
        exit(1);
    }
    */

    /* Replicate the material database among the processors */
    /*

    if ((theGroupSize - 1) / PROCPERNODE >= 1) {

        MPI_Comm replica_comm;

        if (myID % PROCPERNODE != 0) {
            MPI_Comm_split(MPI_COMM_WORLD, MPI_UNDEFINED, myID, &replica_comm);

        } else {
            int replica_id;
            off_t filesize, remains, batchsize;
            void *filebuf;
            int fd;

            MPI_Comm_split(MPI_COMM_WORLD, 0, myID, &replica_comm);
            MPI_Comm_rank(replica_comm, &replica_id);

            if (replica_id == 0) {

                struct stat statbuf;


                if (stat(argv[1], &statbuf) != 0) {
                    fprintf(stderr, "Thread 0: Cannot get stat of %s\n",
                            argv[1]);
                    MPI_Abort(MPI_COMM_WORLD, ERROR);
                    exit(1);
                }

                filesize = statbuf.st_size;
            }

            MPI_Bcast(&filesize, sizeof(off_t), MPI_CHAR, 0, replica_comm);

            if ((filebuf = malloc(FILEBUFSIZE)) == NULL) {
                fprintf(stderr, "Thread %d: run out of memory while ", myID);
                fprintf(stderr, "preparing to receive material database\n");
                MPI_Abort(MPI_COMM_WORLD, ERROR);
                exit(1);
            }

	    fd = (replica_id == 0) ?
	    open(argv[1], O_RDONLY) :
                open(argv[1], O_CREAT|O_TRUNC|O_WRONLY, S_IRUSR|S_IWUSR);

            if (fd == -1) {
                fprintf(stderr, "Thread %d: Cannot create replica database\n",
                        myID);
                perror("open");
                MPI_Abort(MPI_COMM_WORLD, ERROR);
                exit(1);
            }

            remains = filesize;
            while (remains > 0) {
                batchsize = (remains > FILEBUFSIZE) ? FILEBUFSIZE : remains;

                if (replica_id == 0) {
                    if (read(fd, filebuf, batchsize) !=  batchsize) {
                        fprintf(stderr, "Thread 0: Cannot read database\n");
                        perror("read");
                        MPI_Abort(MPI_COMM_WORLD, ERROR);
                        exit(1);
                    }
                }

                MPI_Bcast(filebuf, batchsize, MPI_CHAR, 0, replica_comm);

                if (replica_id != 0) {
                    if (write(fd, filebuf, batchsize) != batchsize) {
                        fprintf(stderr, "Thread %d: Cannot write replica ",
                                myID);
                        fprintf(stderr, "database\n");
                        MPI_Abort(MPI_COMM_WORLD, ERROR);
                        exit(1);
                    }
                }

                remains -= batchsize;
		}

            if (close(fd) != 0) {
                fprintf(stderr, "Thread %d: cannot close replica database\n",
                        myID);
                perror("close");
                MPI_Abort(MPI_COMM_WORLD, ERROR);
                exit(1);
            }
	    }

        MPI_Barrier(MPI_COMM_WORLD);

	}

    */



    /* Initialize static global varialbes */
    if (myID == 0) {
	/* Processor 0 reads the parameters */
	if (initparameters(argv[2], argv[3], &x, &y, &z) != 0) {
	    fprintf(stderr, "Thread %d: Cannot init parameters\n", myID);
	    MPI_Abort(MPI_COMM_WORLD, ERROR);
	    exit(1);
	}

	factor = theFactor;
	vscut = theVsCut;
	double_message[0] = x;
	double_message[1] = y;
	double_message[2] = z;
	double_message[3] = factor;
	double_message[4] = vscut;

	/*
          fprintf(stderr, "&double_message[0] = %p\n", &double_message[0]);
          fprintf(stderr, "&double_message[4] = %p\n", &double_message[4]);
	  fprintf(stderr, "Thread 0: %f %f %f %f %f\n", x, y, z, factor, vscut);
        */
    }

    MPI_Bcast(double_message, 5, MPI_DOUBLE, 0, MPI_COMM_WORLD);

    x = double_message[0];
    y = double_message[1];
    z = double_message[2];
    factor = double_message[3];
    vscut  = double_message[4];

    theNorth_m = x;
    theEast_m = y;
    theDepth_m = z;

    /*
      printf("Thread %d: %f %f %f %f %f\n", myID, x, y, z, factor, vscut);
    */

    theFactor = factor;
    theVsCut = vscut;

    MPI_Barrier(MPI_COMM_WORLD);
    elapsedtime = -MPI_Wtime();


    if (myID == 0) {
	fprintf(stdout, "PE = %d, Freq = %.2f\n", theGroupSize, theFreq);
	fprintf(stdout, "-----------------------------------------------\n");
    }


    /*----  Generate and partition an unstructured octree mesh ----*/
    if (myID == 0) {
	fprintf(stdout, "octor_newtree ... ");
    }

    /*
     * RICARDO: Carful with new_octree parameters (cutoff_depth)
     */

    myOctree = octor_newtree(x, y, z, sizeof(edata_t), myID, theGroupSize, MPI_COMM_WORLD, 0);
    if (myOctree == NULL) {
	fprintf(stderr, "Thread %d: fail to create octree\n", myID);
	MPI_Abort(MPI_COMM_WORLD, ERROR);
	exit(1);
    }
    MPI_Barrier(MPI_COMM_WORLD);

    elapsedtime += MPI_Wtime();

    if (myID == 0) {
	fprintf(stdout, "done.... %.2f seconds\n", elapsedtime);
    }

#ifdef USECVMDB
    /* Open my copy of the material database */
    theCVMEp = etree_open(argv[1], O_RDONLY, CVMBUFSIZE, 0, 0);
    if (!theCVMEp) {
	fprintf(stderr, "Thread %d: Cannot open CVM etree database %s\n",
		myID, argv[1]);
	MPI_Abort(MPI_COMM_WORLD, ERROR);
	exit(1);
    }

#else

    /* Use flat data record file and distibute the data in memories */
    elapsedtime = -MPI_Wtime();
    if (myID == 0) {
	fprintf(stdout, "slicing CVM database ...");
    }

    theCVMRecord = sliceCVM(argv[1]);

    MPI_Barrier(MPI_COMM_WORLD);
    elapsedtime += MPI_Wtime();
    if (theCVMRecord == NULL) {
	fprintf(stderr, "Thread %d: Error obtaining the CVM records from %s\n",
		myID, argv[1]);
	MPI_Abort(MPI_COMM_WORLD, ERROR);
	exit(1);
    };
    if (myID == 0) {
	fprintf(stdout, "done.... %.2f seconds\n", elapsedtime);
    }

#endif

    elapsedtime = -MPI_Wtime();
    if (myID == 0) {
	fprintf(stdout, "octor_refinetree ...");
    }
    if (octor_refinetree(myOctree, toexpand, setrec) != 0) {
	fprintf(stderr, "Thread %d: fail to refine octree\n", myID);
	MPI_Abort(MPI_COMM_WORLD, ERROR);
	exit(1);
    }

    MPI_Barrier(MPI_COMM_WORLD);
    elapsedtime += MPI_Wtime();

    if (myID == 0) {
	fprintf(stdout, "done.... %.2f seconds\n", elapsedtime);
    }


    elapsedtime = -MPI_Wtime();
    if (myID == 0) {
	fprintf(stdout, "octor_balancetree ... ");
    }
    if (octor_balancetree(myOctree, setrec, 0) != 0) { /* no progressive meshing (ricardo) */
	fprintf(stderr, "Thread %d: fail to balance octree\n", myID);
	MPI_Abort(MPI_COMM_WORLD, ERROR);
	exit(1);
    }
    MPI_Barrier(MPI_COMM_WORLD);

    elapsedtime += MPI_Wtime();
    if (myID == 0) {
	fprintf(stdout, "done.... %.2f seconds\n", elapsedtime);
    }

#ifdef USECVMDB
    /* Close the material database */
    etree_close(theCVMEp);
#else
    free(theCVMRecord);

#endif /* USECVMDB */

    elapsedtime = -MPI_Wtime();
    if (myID == 0) {
	fprintf(stdout, "octor_partitiontree ...");
    }
    if (octor_partitiontree(myOctree, NULL) != 0) {
	fprintf(stderr, "Thread %d: fail to balance load\n", myID);
	MPI_Abort(MPI_COMM_WORLD, ERROR);
	exit(1);
    }
    MPI_Barrier(MPI_COMM_WORLD);

    elapsedtime +=  MPI_Wtime();
    if (myID == 0) {
	fprintf(stdout, "done.... %.2f seconds\n", elapsedtime);
    }

    elapsedtime = - MPI_Wtime();
    if (myID == 0) {
	fprintf(stdout, "octor_extractmesh ... ");
    }
    mesh = octor_extractmesh(myOctree, NULL);
    if (mesh == NULL) {
	fprintf(stderr, "Thread %d: fail to extract mesh\n", myID);
	MPI_Abort(MPI_COMM_WORLD, ERROR);
	exit(1);
    }
    MPI_Barrier(MPI_COMM_WORLD);

    elapsedtime += MPI_Wtime();
    if (myID == 0) {
	fprintf(stdout, "done.... %.2f seconds\n", elapsedtime);
    }

    /* We can do without the octree now */
    octor_deletetree(myOctree);


    /*---- Obtain and print the statistics of the mesh ----*/
    if (myID == 0) {
	int64_t etotal, ntotal, dntotal;
	int32_t received, procid;
	int32_t *enumTable, *nnumTable, *dnnumTable;
	int32_t rcvtrio[3];

	/* Allocate the arrays to hold the statistics */
	enumTable = (int32_t *)malloc(sizeof(int32_t) * theGroupSize);
	nnumTable = (int32_t *)malloc(sizeof(int32_t) * theGroupSize);
	dnnumTable = (int32_t *)malloc(sizeof(int32_t) * theGroupSize);

	if ((enumTable == NULL) ||
	    (nnumTable == NULL) ||
	    (dnnumTable == NULL)) {
	    fprintf(stderr, "Thread 0: out of memory\n");
	    MPI_Abort(MPI_COMM_WORLD, ERROR);
	    exit(1);
	}

	/* Fill in my counts */
	enumTable[0] = mesh->lenum;
	nnumTable[0] = mesh->lnnum;
	dnnumTable[0] = mesh->ldnnum;

	/* Initialize sums */
	etotal = mesh->lenum;
	ntotal = mesh->lnnum;
	dntotal = mesh->ldnnum;

	/* Fill in the rest of the tables */
	received = 0;
	while (received < theGroupSize - 1) {
	    int32_t fromwhom;
	    MPI_Status status;

	    MPI_Probe(MPI_ANY_SOURCE, STAT_MSG, MPI_COMM_WORLD, &status);

	    fromwhom = status.MPI_SOURCE;

	    MPI_Recv(rcvtrio, 3, MPI_INT, fromwhom, STAT_MSG, MPI_COMM_WORLD,
		     &status);

	    enumTable[fromwhom] = rcvtrio[0];
	    nnumTable[fromwhom] = rcvtrio[1];
	    dnnumTable[fromwhom] = rcvtrio[2];

	    etotal += rcvtrio[0];
	    ntotal += rcvtrio[1];
	    dntotal += rcvtrio[2];

	    received++;
	}

	fprintf(stdout, "Mesh statistics:\n");
	fprintf(stdout, "                 Elements     Nodes    Danglings\n");
#ifdef ALPHA_TRU64UNIX_CC
	fprintf(stdout, "Total     :   %10ld%10ld   %10ld\n\n",
		etotal, ntotal, dntotal);
	for (procid = 0; procid < theGroupSize; procid++) {
	    fprintf(stdout, "Proc %5d:   %10d%10d   %10d\n", procid,
		    enumTable[procid], nnumTable[procid], dnnumTable[procid]);
	}

#else
	fprintf(stdout, "Total      :    %10qd%10qd   %10qd\n\n",
		etotal, ntotal, dntotal);
	for (procid = 0; procid < theGroupSize; procid++) {
	    fprintf(stdout, "Proc %5d:   %10d%10d   %10d\n", procid,
		    enumTable[procid], nnumTable[procid], dnnumTable[procid]);
	}
#endif

	free(enumTable);
	free(nnumTable);
	free(dnnumTable);

    } else {
	int32_t sndtrio[3];

	sndtrio[0] = mesh->lenum;
	sndtrio[1] = mesh->lnnum;
	sndtrio[2] = mesh->ldnnum;

	MPI_Send(sndtrio, 3, MPI_INT, 0, STAT_MSG, MPI_COMM_WORLD);
    }

#ifndef NO_OUTPUT

    /*---- Join elements and nodes, and send to Thread 0 for output */

    /* Allocate a fixed size buffer space to store the join results */
    partTable = (mrecord_t *)calloc(BATCH, sizeof(mrecord_t));
    if (partTable == NULL) {
	fprintf(stderr,	 "Thread %d: out of memory\n", myID);
	MPI_Abort(MPI_COMM_WORLD, ERROR);
	exit(1);
    }

    if (myID == 0) {
	char *mEtree;
	etree_t *mep;
	int32_t procid;

	mEtree = argv[4];
	mep = etree_open(mEtree, O_CREAT|O_RDWR|O_TRUNC, 0, sizeof(mdata_t),3);
	if (mep == NULL) {
	    fprintf(stderr, "Thread 0: cannot create mesh etree\n");
	    MPI_Abort(MPI_COMM_WORLD, ERROR);
	    exit(1);
	}

	/* Begin an appending operation */
	if (etree_beginappend(mep, 1) != 0) {
	    fprintf(stderr, "Thread 0: cannot begin an append operation\n");
	    MPI_Abort(MPI_COMM_WORLD, ERROR);
	    exit(1);
	}

	eindex = 0;
	while (eindex < mesh->lenum) {
	    remains = mesh->lenum - eindex;
	    batch = (remains < BATCH) ? remains : BATCH;

	    for (idx = 0; idx < batch; idx++) {
		mrecord_t *mrecord;
		int32_t whichnode;
		int32_t localnid0;

		mrecord = &partTable[idx];

		/* Fill the address field */
		localnid0 = mesh->elemTable[eindex].lnid[0];

		mrecord->addr.x = mesh->nodeTable[localnid0].x;
		mrecord->addr.y = mesh->nodeTable[localnid0].y;
		mrecord->addr.z = mesh->nodeTable[localnid0].z;
		mrecord->addr.level = mesh->elemTable[eindex].level;
		mrecord->addr.type = ETREE_LEAF;

		/* Find the global node ids for the vertices */
		for (whichnode = 0; whichnode < 8; whichnode++) {
		    int32_t localnid;
		    int64_t globalnid;

		    localnid = mesh->elemTable[eindex].lnid[whichnode];
		    globalnid = mesh->nodeTable[localnid].gnid;

		    mrecord->mdata.nid[whichnode] = globalnid;
		}

		memcpy(&mrecord->mdata.edgesize, mesh->elemTable[eindex].data,
		       sizeof(edata_t));

		eindex++;
	    } /* for a batch */

	    if (bulkload(mep, partTable, batch) != 0) {
		fprintf(stderr, "Thread 0: Error bulk-loading data\n");
		MPI_Abort(MPI_COMM_WORLD, ERROR);
		exit(1);
	    }
	} /* for all the elements Thread 0 has */

	/* Receive data from other processors */
	for (procid = 1; procid < theGroupSize; procid++) {
	    MPI_Status status;
	    int32_t rcvbytecount;

	    /* Signal the next processor to go ahead */
	    MPI_Send(NULL, 0, MPI_CHAR, procid, GOAHEAD_MSG, MPI_COMM_WORLD);

	    while (1) {
		MPI_Probe(procid, MESH_MSG, MPI_COMM_WORLD, &status);
		MPI_Get_count(&status, MPI_CHAR, &rcvbytecount);

		batch = rcvbytecount / sizeof(mrecord_t);
		if (batch == 0) {
		    /* Done */
		    break;
		}

		MPI_Recv(partTable, rcvbytecount, MPI_CHAR, procid,
			 MESH_MSG, MPI_COMM_WORLD, &status);

		if (bulkload(mep, partTable, batch) != 0) {
		    fprintf(stderr, "Thread 0: Cannot bulk-load data from ");
		    fprintf(stderr, "Thread %d\n", procid);
		    MPI_Abort(MPI_COMM_WORLD, ERROR);
		    exit(1);
		}
	    } /* while there is more data to be received from procid */
	} /* for all the processors */

	/* End the appending operation */
	etree_endappend(mep);

	/* Close the mep to ensure the data is on disk */
	if (etree_close(mep) != 0) {
	    fprintf(stderr, "Thread 0: Cannot close the etree database\n");
	    MPI_Abort(MPI_COMM_WORLD, ERROR);
	    exit(1);
	}

    } else {
	/* Processors other than 0 needs to send data to 0 */
	int32_t sndbytecount;
	MPI_Status status;

	/* Wait for my turn */
	MPI_Recv(NULL, 0, MPI_CHAR, 0, GOAHEAD_MSG, MPI_COMM_WORLD, &status);

	eindex = 0;
	while (eindex < mesh->lenum) {
	    remains = mesh->lenum - eindex;
	    batch = (remains < BATCH) ? remains : BATCH;

	    for (idx = 0; idx < batch; idx++) {
		mrecord_t *mrecord;
		int32_t whichnode;
		int32_t localnid0;

		mrecord = &partTable[idx];

		/* Fill the address field */
		localnid0 = mesh->elemTable[eindex].lnid[0];

		mrecord->addr.x = mesh->nodeTable[localnid0].x;
		mrecord->addr.y = mesh->nodeTable[localnid0].y;
		mrecord->addr.z = mesh->nodeTable[localnid0].z;
		mrecord->addr.level = mesh->elemTable[eindex].level;
		mrecord->addr.type = ETREE_LEAF;

		/* Find the global node ids for the vertices */
		for (whichnode = 0; whichnode < 8; whichnode++) {
		    int32_t localnid;
		    int64_t globalnid;

		    localnid = mesh->elemTable[eindex].lnid[whichnode];
		    globalnid = mesh->nodeTable[localnid].gnid;

		    mrecord->mdata.nid[whichnode] = globalnid;
		}

		memcpy(&mrecord->mdata.edgesize, mesh->elemTable[eindex].data,
		       sizeof(edata_t));

		eindex++;
	    } /* for a batch */

	    /* Send data to proc 0 */
	    sndbytecount = batch * sizeof(mrecord_t);
	    MPI_Send(partTable, sndbytecount, MPI_CHAR, 0, MESH_MSG,
		     MPI_COMM_WORLD);
	} /* While there is data left to be sent */

	/* Send an empty message to indicate the end of my transfer */
	MPI_Send(NULL, 0, MPI_CHAR, 0, MESH_MSG, MPI_COMM_WORLD);
    }

    /* Free the memory for the partial join results */
    free(partTable);

#endif

    octor_deletemesh(mesh);

    MPI_Finalize();

    return 0;
}