GrB_Info GrB_Monoid_free // free a user-created monoid ( GrB_Monoid *monoid // handle of monoid to free ) { if (monoid != NULL) { GrB_Monoid mon = *monoid ; if (mon != NULL && mon->object_kind == GB_USER_RUNTIME) { if (mon->magic == GB_MAGIC) { // only user-defined monoids are freed. predefined monoids // are statically allocated and cannot be freed. mon->magic = GB_FREED ; // to help detect dangling pointers // mon->op->ztype->size might not be safe if op or ztype are // user-defined and have already been freed; use op_ztype_size. GB_FREE_MEMORY (mon->identity, 1, mon->op_ztype_size) ; GB_FREE_MEMORY (*monoid, 1, sizeof (struct GB_Monoid_opaque)) ; } (*monoid) = NULL ; } } return (GrB_SUCCESS) ; }
GrB_Info GrB_Descriptor_free // free a descriptor ( GrB_Descriptor *descriptor // handle of descriptor to free ) { if (descriptor != NULL) { GrB_Descriptor desc = *descriptor ; if (desc != NULL && desc->magic == GB_MAGIC) { desc->magic = GB_FREED ; // to help detect dangling pointers GB_FREE_MEMORY (*descriptor, 1, sizeof (struct GB_Descriptor_opaque)) ; } (*descriptor) = NULL ; } return (GrB_SUCCESS) ; }
GrB_Info GB_free // free a matrix ( GrB_Matrix *matrix // handle of matrix to free ) { if (matrix != NULL) { GrB_Matrix A = *matrix ; if (A != NULL && (A->magic == GB_MAGIC || A->magic == GB_MAGIC2)) { // free all content of A, including the Sauna GB_CONTENT_FREE (A) ; // free the header of A itself A->magic = GB_FREED ; // to help detect dangling pointers GB_FREE_MEMORY (*matrix, 1, sizeof (struct GB_Matrix_opaque)) ; } (*matrix) = NULL ; } return (GrB_SUCCESS) ; }
GrB_Info GrB_BinaryOp_free // free a user-created binary operator ( GrB_BinaryOp *binaryop // handle of binary operator to free ) { if (binaryop != NULL) { // only free a run-time user-defined operator GrB_BinaryOp op = *binaryop ; if (op != NULL && op->opcode == GB_USER_R_opcode) { if (op->magic == GB_MAGIC) { op->magic = GB_FREED ; // to help detect dangling pointers GB_FREE_MEMORY (*binaryop, 1, sizeof (struct GB_BinaryOp_opaque)) ; } (*binaryop) = NULL ; } } return (GrB_SUCCESS) ; }
GrB_Info GB_to_hyper // convert a matrix to hypersparse ( GrB_Matrix A, // matrix to convert to hypersparse GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- // GB_subref_numeric can return a matrix with jumbled columns, since it // soon be transposed (and sorted) in GB_accum_mask. However, it passes // the jumbled matrix to GB_to_hyper_conform. This function does not // access the row indices at all, so it works fine if the columns have // jumbled row indices. ASSERT_OK_OR_JUMBLED (GB_check (A, "A converting to hypersparse", GB0)) ; #ifndef NDEBUG GrB_Info info ; #endif //-------------------------------------------------------------------------- // convert A to hypersparse form //-------------------------------------------------------------------------- if (!A->is_hyper) { ASSERT (A->h == NULL) ; ASSERT (A->nvec == A->plen && A->plen == A->vdim) ; //---------------------------------------------------------------------- // count the number of non-empty vectors in A //---------------------------------------------------------------------- int64_t *restrict Ap_old = A->p ; bool Ap_old_shallow = A->p_shallow ; int64_t n = A->vdim ; int64_t nvec_new = A->nvec_nonempty ; //---------------------------------------------------------------------- // allocate the new A->p and A->h //---------------------------------------------------------------------- int64_t *restrict Ap_new ; int64_t *restrict Ah_new ; GB_MALLOC_MEMORY (Ap_new, nvec_new+1, sizeof (int64_t)) ; GB_MALLOC_MEMORY (Ah_new, nvec_new, sizeof (int64_t)) ; if (Ap_new == NULL || Ah_new == NULL) { // out of memory A->is_hyper = true ; // A is hypersparse, but otherwise invalid GB_FREE_MEMORY (Ap_new, nvec_new+1, sizeof (int64_t)) ; GB_FREE_MEMORY (Ah_new, nvec_new, sizeof (int64_t)) ; GB_CONTENT_FREE (A) ; return (GB_OUT_OF_MEMORY (GBYTES (2*nvec_new+1, sizeof (int64_t)))); } //---------------------------------------------------------------------- // transplant the new A->p and A->h into the matrix //---------------------------------------------------------------------- // this must be done here so that GB_jappend, just below, can be used. A->is_hyper = true ; A->plen = nvec_new ; A->nvec = 0 ; A->p = Ap_new ; A->h = Ah_new ; A->p_shallow = false ; A->h_shallow = false ; //---------------------------------------------------------------------- // construct the new hyperlist in the new A->p and A->h //---------------------------------------------------------------------- int64_t jlast, anz, anz_last ; GB_jstartup (A, &jlast, &anz, &anz_last) ; for (int64_t j = 0 ; j < n ; j++) { anz = Ap_old [j+1] ; ASSERT (A->nvec <= A->plen) ; #ifndef NDEBUG info = #endif GB_jappend (A, j, &jlast, anz, &anz_last, Context) ; ASSERT (info == GrB_SUCCESS) ; ASSERT (A->nvec <= A->plen) ; } GB_jwrapup (A, jlast, anz) ; ASSERT (A->nvec == nvec_new) ; ASSERT (A->nvec_nonempty == nvec_new) ; //---------------------------------------------------------------------- // free the old A->p unless it's shallow //---------------------------------------------------------------------- // this cannot use GB_ph_free because the new A->p content has already // been placed into A, as required by GB_jappend just above. if (!Ap_old_shallow) { GB_FREE_MEMORY (Ap_old, n+1, sizeof (int64_t)) ; } } //-------------------------------------------------------------------------- // A is now in hypersparse form //-------------------------------------------------------------------------- ASSERT_OK_OR_JUMBLED (GB_check (A, "A converted to hypersparse", GB0)) ; ASSERT (A->is_hyper) ; return (GrB_SUCCESS) ; }