Example #1
0
static int
check_object(PyObject *ob, int t, char *obname,
                        char *tname, char *funname)
{
    if (!PyArray_Check(ob)) {
        PyErr_Format(LapackError,
                     "Expected an array for parameter %s in lapack_lite.%s",
                     obname, funname);
        return 0;
    }
    else if (!PyArray_IS_C_CONTIGUOUS((PyArrayObject *)ob)) {
        PyErr_Format(LapackError,
                     "Parameter %s is not contiguous in lapack_lite.%s",
                     obname, funname);
        return 0;
    }
    else if (!(PyArray_TYPE((PyArrayObject *)ob) == t)) {
        PyErr_Format(LapackError,
                     "Parameter %s is not of type %s in lapack_lite.%s",
                     obname, tname, funname);
        return 0;
    }
    else if (PyArray_ISBYTESWAPPED((PyArrayObject *)ob)) {
        PyErr_Format(LapackError,
                     "Parameter %s has non-native byte order in lapack_lite.%s",
                     obname, funname);
        return 0;
    }
    else {
        return 1;
    }
}
Example #2
0
int NCFormat_from_spMatrix(SuperMatrix * A, int m, int n, int nnz,
			   PyArrayObject * nzvals, PyArrayObject * rowind,
			   PyArrayObject * colptr, int typenum)
{
    int ok = 0;

    ok = (PyArray_EquivTypenums(PyArray_DESCR(nzvals)->type_num, typenum) &&
          PyArray_EquivTypenums(PyArray_DESCR(rowind)->type_num, NPY_INT) &&
          PyArray_EquivTypenums(PyArray_DESCR(colptr)->type_num, NPY_INT) &&
          PyArray_NDIM(nzvals) == 1 &&
          PyArray_NDIM(rowind) == 1 &&
          PyArray_NDIM(colptr) == 1 &&
          PyArray_IS_C_CONTIGUOUS(nzvals) &&
          PyArray_IS_C_CONTIGUOUS(rowind) &&
          PyArray_IS_C_CONTIGUOUS(colptr) &&
          nnz <= PyArray_DIM(nzvals, 0) &&
          nnz <= PyArray_DIM(rowind, 0) &&
          n+1 <= PyArray_DIM(colptr, 0));
    if (!ok) {
	PyErr_SetString(PyExc_ValueError,
			"sparse matrix arrays must be 1-D C-contiguous and of proper "
                        "sizes and types");
	return -1;
    }


    if (setjmp(_superlu_py_jmpbuf))
	return -1;
    else {
	if (!CHECK_SLU_TYPE(nzvals->descr->type_num)) {
	    PyErr_SetString(PyExc_TypeError, "Invalid type for array.");
	    return -1;
	}
	Create_CompCol_Matrix(nzvals->descr->type_num,
			      A, m, n, nnz, nzvals->data,
			      (int *) rowind->data, (int *) colptr->data,
			      SLU_NC,
			      NPY_TYPECODE_TO_SLU(nzvals->descr->type_num),
			      SLU_GE);
    }

    return 0;
}
Example #3
0
Domi::MDArrayRCP< T >
convertToMDArrayRCP(PyArrayObject * pyArray)
{
  // Get the number of dimensions and initialize the dimensions and
  // strides arrays
  int numDims = PyArray_NDIM(pyArray);
  Teuchos::Array< Domi::dim_type  > dims(   numDims);
  Teuchos::Array< Domi::size_type > strides(numDims);

  // Set the dimensions and strides
  for (int axis = 0; axis < numDims; ++axis)
  {
    dims[   axis] = (Domi::dim_type ) PyArray_DIM(   pyArray, axis);
    strides[axis] = (Domi::size_type) PyArray_STRIDE(pyArray, axis);
  }

  // Get the data pointer and layout
  T * data = (T*) PyArray_DATA(pyArray);
  Domi::Layout layout = PyArray_IS_C_CONTIGUOUS(pyArray) ? Domi::C_ORDER :
    Domi::FORTRAN_ORDER;

  // Return the result
  return Domi::MDArrayRCP< T >(dims, strides, data, layout);
}
Example #4
0
int APPLY_SPECIFIC(ctc_cost_cpu)(PyArrayObject *  in_activations,
                                 PyArrayObject *  in_labels,
                                 PyArrayObject *  in_input_lengths,
                                 PyArrayObject ** out_costs,
                                 PyArrayObject ** out_gradients)
{
    ctc_context_t ctc_object;
    ctc_context_t * context = &ctc_object;
    ctc_context_init( context );

    if ( !PyArray_IS_C_CONTIGUOUS( in_activations ) )
    {
        PyErr_SetString( PyExc_RuntimeError,
            "ConnectionistTemporalClassification: activations array must be C-contiguous." );
        return 1;
    }

    npy_float32 * activations = (npy_float32 *) PyArray_DATA( in_activations );

    create_contiguous_input_lengths( in_input_lengths, &(context->input_lengths) );

    if ( NULL == context->input_lengths )
    {
        // Destroy previous CTC context before returning exception
        ctc_context_destroy( context );

        PyErr_Format( PyExc_MemoryError,
            "ConnectionistTemporalClassification: Could not allocate memory for input lengths" );
        return 1;
    }

    // flatten labels to conform with library memory layout
    create_flat_labels( in_labels, &(context->flat_labels), &(context->label_lengths) );

    if ( ( NULL == context->label_lengths ) || ( NULL == context->flat_labels ) )
    {
        // Destroy previous CTC context before returning exception
        ctc_context_destroy( context );

        PyErr_Format( PyExc_MemoryError,
            "ConnectionistTemporalClassification: Could not allocate memory for labels and their lengths" );
        return 1;
    }

    npy_int minibatch_size = PyArray_DIMS( in_activations )[1];
    npy_int alphabet_size = PyArray_DIMS( in_activations )[2];

    npy_float32 * costs = NULL;
    npy_intp cost_size = minibatch_size;

    if ( (*out_costs) == NULL ||                       // Symbolic variable has no memory backing
         PyArray_NDIM( *out_costs ) != 1 ||            // or, matrix has the wrong size
         PyArray_DIMS( *out_costs )[0] != cost_size )
    {
        Py_XDECREF( *out_costs );
        // Allocate new matrix
        *out_costs = (PyArrayObject *) PyArray_ZEROS( 1, &cost_size, NPY_FLOAT32, 0 );

        if ( NULL == (*out_costs) )
        {
            // Destroy previous CTC context before returning exception
            ctc_context_destroy( context );

            PyErr_Format( PyExc_MemoryError,
                "ConnectionistTemporalClassification: Could not allocate memory for CTC costs" );
            return 1;
        }
    }

    costs = (npy_float32 *) PyArray_DATA( *out_costs );

    npy_float32 * gradients = NULL;

    if ( NULL != out_gradients )  // If gradient computation is not disabled
    {
        if ( NULL == (*out_gradients) ||  // Symbolic variable has no real backing
            PyArray_NDIM( *out_gradients ) != 3 ||
            PyArray_DIMS( *out_gradients )[0] != PyArray_DIMS( in_activations )[0] ||
            PyArray_DIMS( *out_gradients )[1] != PyArray_DIMS( in_activations )[1] ||
            PyArray_DIMS( *out_gradients )[2] != PyArray_DIMS( in_activations )[2] )
        {
            // Existing matrix is the wrong size. Make a new one.
            // Decrement ref counter to existing array
            Py_XDECREF( *out_gradients );
            // Allocate new array
            *out_gradients = (PyArrayObject *) PyArray_ZEROS(3, PyArray_DIMS( in_activations ),
                NPY_FLOAT32, 0);

            if ( NULL == (*out_gradients) )
            {
                // Destroy previous CTC context before returning exception
                ctc_context_destroy( context );

                PyErr_Format( PyExc_MemoryError,
                    "ConnectionistTemporalClassification: Could not allocate memory for CTC gradients!" );
                return 1;
            }
        }
        gradients = (npy_float32 *) PyArray_DATA( *out_gradients );
    }

    size_t cpu_workspace_size;
    int ctc_error;

    ctc_error = ctc_check_result( get_workspace_size( context->label_lengths,
        context->input_lengths, alphabet_size, minibatch_size, context->options,
        &cpu_workspace_size ),
        "Failed to obtain CTC workspace size." );

    if ( ctc_error )  // Exception is set by ctc_check_result, return error here
    {
        // Destroy previous CTC context before returning exception
        ctc_context_destroy( context );

        return 1;
    }

    context->workspace = malloc( cpu_workspace_size );

    if ( NULL == context->workspace )
    {
        // Destroy previous CTC context before returning exception
        ctc_context_destroy( context );

        PyErr_Format( PyExc_MemoryError,
            "ConnectionistTemporalClassification: Failed to allocate memory for CTC workspace." );
        return 1;
    }

    ctc_error = ctc_check_result( compute_ctc_loss( activations, gradients,
        context->flat_labels, context->label_lengths, context->input_lengths,
        alphabet_size, minibatch_size, costs, context->workspace,
        context->options ), "Failed to compute CTC loss function." );

    if ( ctc_error )  // Exception is set by ctc_check_result, return error here
    {
        ctc_context_destroy( context );

        return 1;
    }

    ctc_context_destroy( context );

    return 0;
}
Example #5
0
int gpu_dimshuffle(PyGpuArrayObject* input, PyGpuArrayObject** out, PARAMS_TYPE* params) {
    PyGpuArrayObject *tmp = NULL;
    npy_intp nd_in = PyArray_SIZE(params->input_broadcastable);
    npy_intp nd_out = PyArray_SIZE(params->_new_order);
    npy_int64* new_order = NULL;
    unsigned int* transposition = NULL;
    size_t* sh = NULL;
    int e;

    if (input->ga.nd != nd_in) {
        PyErr_SetString(PyExc_TypeError, "input nd");
        return 1;
    }
    if (!PyArray_IS_C_CONTIGUOUS(params->_new_order)) {
        PyErr_SetString(PyExc_RuntimeError, "DimShuffle: param _new_order must be C-contiguous.");
        return 1;
    }
    if (!PyArray_IS_C_CONTIGUOUS(params->transposition)) {
        PyErr_SetString(PyExc_RuntimeError, "GpuDimShuffle: param transposition must be C-contiguous.");
        return 1;
    }

    Py_XDECREF(*out);

    /** Do shuffle. **/

    new_order = (npy_int64*) PyArray_DATA(params->_new_order);
    /* Type of params->transposition (npy_uint32) should be an alias of unsigned int
     * on platforms supported by Theano. */
    transposition = (unsigned int*) PyArray_DATA(params->transposition);
    sh = (size_t*) malloc(nd_out * sizeof(size_t));
    if (sh == NULL) {
        PyErr_NoMemory();
        return 1;
    }
    tmp = pygpu_transpose(input, transposition);
    if (!tmp) {
        free(sh);
        return 1;
    }
    e = 0;
    for (npy_intp i = 0; i < nd_out; ++i) {
        if (new_order[i] == -1) {
            sh[i] = 1;
        } else {
            sh[i] = tmp->ga.dimensions[e];
            ++e;
        }
    }
    *out = pygpu_reshape(tmp, nd_out, sh, GA_ANY_ORDER, 1, -1);
    Py_DECREF(tmp);
    free(sh);

    if (*out == NULL) {
        return 1;
    }

    /** End shuffle. **/

    if (!params->inplace) {
        tmp = pygpu_copy(*out, GA_ANY_ORDER);
        Py_DECREF(*out);
        if (!tmp) {
            *out = NULL;
            return 1;
        }
        *out = tmp;
    }

    return 0;
}