static PyObject *array_push_for_alloc_and_attach(PyObject *self, PyObject *args) { PyArrayObject *array; int count; int nbytes; MPI_Datatype mpi_type; int error, myid; count = nbytes = error = 0; mpi_type = (MPI_Datatype) 0; myid = -1; /* Process the parameters. */ if (!PyArg_ParseTuple(args, "O", &array)) return NULL; /* Input check and determination of MPI type */ mpi_type = type_map(array, &count); if (!mpi_type) return NULL; /* Compute number of bytes. */ error = MPI_Type_size(mpi_type, &nbytes); buf_size += (nbytes * count + MPI_BSEND_OVERHEAD); if (error != 0) { MPI_Comm_rank(MPI_COMM_WORLD, &myid); PyErr_Format(PyExc_RuntimeError, "rank %d: array_push_for_alloc_and_attach: \ MPI_Type_size failed with return value %d", myid, error); return NULL; }
static PyObject *gather_array(PyObject *self, PyObject *args) { PyArrayObject *x; PyArrayObject *d; int source, error, count; MPI_Datatype mpi_type; /* process the parameters */ if (!PyArg_ParseTuple(args, "OOi", &x, &d, &source)) return NULL; /* Input check and determination of MPI type */ mpi_type = type_map(x, &count); if (!mpi_type) return NULL; /* call the MPI routine */ error = MPI_Gather(x->data, count, mpi_type, d->data, count, mpi_type, source, MPI_COMM_WORLD); if (error != 0) { rank_raise_mpi_runtime(error, "MPI_Gather"); return NULL; } Py_INCREF(Py_None); return (Py_None); }
static PyObject *send_array(PyObject *self, PyObject *args) { PyObject *input; PyArrayObject *x; int destination, tag, error, count; MPI_Datatype mpi_type; /* process the parameters */ if (!PyArg_ParseTuple(args, "Oii", &input, &destination, &tag)) return NULL; /* Make Numpy array from general sequence type (no cost if already Numpy). */ x = (PyArrayObject *) PyArray_ContiguousFromObject(input, NPY_NOTYPE, 0, 0); /* Input check and determination of MPI type */ mpi_type = type_map(x, &count); if (!mpi_type) return NULL; /* call the MPI routine */ error = MPI_Send(x->data, count, mpi_type, destination, tag, MPI_COMM_WORLD); Py_DECREF(x); if (error != 0) { rank_raise_mpi_runtime(error, "MPI_Send"); return NULL; } Py_INCREF(Py_None); return (Py_None); }
static PyObject *reduce_array(PyObject *self, PyObject *args) { PyArrayObject *x; PyArrayObject *d; int length, source, op, err; MPI_Datatype mpi_type; MPI_Status status; MPI_Op mpi_op; /* process the parameters */ if (!PyArg_ParseTuple(args, "OOiii", &x, &d, &length, &op, &source)) return NULL; /* Input check and determination of MPI type */ mpi_type = type_map(x); if (!mpi_type) return NULL; /* Input check and determination of MPI op */ //printf("op: %d\n", op); mpi_op = op_map(op); if (!mpi_op) return NULL; if (op == mpi_MAXLOC || op == mpi_MINLOC) { //not implemented return Py_BuildValue("i", -666); } else { /* call the MPI routine */ err = MPI_Reduce(x->data, d->data, length, mpi_type, mpi_op, source, \ MPI_COMM_WORLD); } return Py_BuildValue("i", err); }
static PyObject *receive_array(PyObject *self, PyObject *args) { PyArrayObject *x; int source, tag, err, st_length; MPI_Datatype mpi_type; MPI_Status status; /* process the parameters */ if (!PyArg_ParseTuple(args, "Oii", &x, &source, &tag)) return NULL; /* Input check and determination of MPI type */ mpi_type = type_map(x); if (!mpi_type) return NULL; /* call the MPI routine */ err = MPI_Recv(x->data, x->dimensions[0], mpi_type, source, tag, \ MPI_COMM_WORLD, &status); MPI_Get_count(&status, mpi_type, &st_length); // status.st_length is not available in all MPI implementations //Alternative is: MPI_Get_elements(MPI_Status *, MPI_Datatype, int *); return Py_BuildValue("i(iiii)", err, status.MPI_SOURCE, status.MPI_TAG, status.MPI_ERROR, st_length); }
static PyObject *send_array(PyObject *self, PyObject *args) { PyObject *input; PyArrayObject *x; int destination, tag, err; MPI_Datatype mpi_type; /* process the parameters */ if (!PyArg_ParseTuple(args, "Oii", &input, &destination, &tag)) return NULL; /* Make Numeric array from general sequence type (no cost if already Numeric)*/ x = (PyArrayObject *) PyArray_ContiguousFromObject(input, PyArray_NOTYPE, 0, 0); /* Input check and determination of MPI type */ mpi_type = type_map(x); if (!mpi_type) return NULL; /* call the MPI routine */ err = MPI_Send(x->data, x->dimensions[0], mpi_type, destination, tag,\ MPI_COMM_WORLD); Py_DECREF(x); return Py_BuildValue("i", err); }
static PyObject *receive_array(PyObject *self, PyObject *args) { PyArrayObject *x; int source, tag, error, st_length, size, count; MPI_Datatype mpi_type; MPI_Status status; if (!PyArg_ParseTuple(args, "Oii", &x, &source, &tag)) return NULL; /* Input check and determination of MPI type */ mpi_type = type_map(x, &count); if (!mpi_type) return NULL; /* call the MPI routine */ error = MPI_Recv(x->data, count, mpi_type, source, tag, MPI_COMM_WORLD, &status); /* Do not DECREF x as it must be returned to Python */ if (error != 0) { rank_raise_mpi_runtime(error, "MPI_Recv"); return NULL; } MPI_Get_count(&status, mpi_type, &st_length); /* status.st_length is not available in all MPI implementations */ /* Alternative is: MPI_Get_elements(MPI_Status *, MPI_Datatype, int *); */ /* FIXME: This might not be watertight on all platforms */ /* Need C equivalent to itemsize().*/ if (mpi_type == MPI_DOUBLE) { size = sizeof(double); /*8 */ } else if (mpi_type == MPI_LONG) { size = sizeof(long); /*8? */ } else if (mpi_type == MPI_FLOAT) { size = sizeof(float); } else if (mpi_type == MPI_INT) { size = sizeof(int); } else { size = 4; } return Py_BuildValue("(iiiii)", status.MPI_SOURCE, status.MPI_TAG, status.MPI_ERROR, st_length, size); }
static PyObject *bcast_array(PyObject *self, PyObject *args) { PyArrayObject *x; int source, err; MPI_Datatype mpi_type; MPI_Status status; /* process the parameters */ if (!PyArg_ParseTuple(args, "Oi", &x, &source)) return NULL; /* Input check and determination of MPI type */ mpi_type = type_map(x); if (!mpi_type) return NULL; /* call the MPI routine */ err = MPI_Bcast(x->data, x->dimensions[0], mpi_type, source, \ MPI_COMM_WORLD); return Py_BuildValue("i", err); }
static PyObject *gather_array(PyObject *self, PyObject *args) { PyArrayObject *x; PyArrayObject *d; int length, source, err; MPI_Datatype mpi_type; MPI_Status status; /* process the parameters */ if (!PyArg_ParseTuple(args, "OiOi", &x, &length, &d, &source)) return NULL; /* Input check and determination of MPI type */ mpi_type = type_map(x); if (!mpi_type) return NULL; /* call the MPI routine */ err = MPI_Gather(x->data, length, mpi_type, d->data, length, mpi_type, source, \ MPI_COMM_WORLD); return Py_BuildValue("i", err); }
static PyObject *reduce_array(PyObject *self, PyObject *args) { PyArrayObject *x; PyArrayObject *d; int source, op, error, count, count1; MPI_Datatype mpi_type, buffer_type; MPI_Op mpi_op; /* process the parameters */ if (!PyArg_ParseTuple(args, "OOii", &x, &d, &op, &source)) { PyErr_SetString(PyExc_RuntimeError, "mpiext.c (reduce_array): could not parse input"); return NULL; } /* Input check and determination of MPI type */ mpi_type = type_map(x, &count); if (!mpi_type) { PyErr_SetString(PyExc_RuntimeError, "mpiext.c (reduce_array): could not determine mpi_type"); return NULL; } /* This error is caught at the pypar level - so we won't end up here unless mpiext is being used independently */ buffer_type = type_map(d, &count1); if (mpi_type != buffer_type) { PyErr_SetString(PyExc_RuntimeError, "mpiext.c (reduce_array): Input array and buffer must be of the same type."); return NULL; } if (count != count1) { PyErr_SetString(PyExc_RuntimeError, "mpiext.c (reduce_array): Input array and buffer must have same length"); return NULL; } /* Input check and determination of MPI op */ mpi_op = op_map(op); if (!mpi_op) { PyErr_SetString(PyExc_RuntimeError, "mpiext.c (reduce_array): could not determine mpi_op"); return NULL; } if (op == MAXLOC || op == MINLOC) { PyErr_SetString(PyExc_RuntimeError, "mpiext.c (reduce_array): MAXLOC and MINLOC are not implemented"); return NULL; } else { /* call the MPI routine */ error = MPI_Reduce(x->data, d->data, count, mpi_type, mpi_op, source, \ MPI_COMM_WORLD); } if (error != 0) { rank_raise_mpi_runtime(error, "MPI_Reduce"); return NULL; } Py_INCREF(Py_None); return (Py_None); }