GrB_Info GB_ijsort ( const GrB_Index *I, // index array of size ni int64_t *p_ni, // input: size of I, output: number of indices in I2 GrB_Index **p_I2, // output array of size ni, where I2 [0..ni2-1] // contains the sorted indices with duplicates removed. GB_Context Context ) { GrB_Index *I2 = NULL ; int64_t ni = *p_ni ; //-------------------------------------------------------------------------- // allocate the new list //-------------------------------------------------------------------------- GB_MALLOC_MEMORY (I2, ni, sizeof (GrB_Index)) ; if (I2 == NULL) { return (GB_OUT_OF_MEMORY (GBYTES (ni, sizeof (GrB_Index)))) ; } //-------------------------------------------------------------------------- // copy I into I2 and sort it //-------------------------------------------------------------------------- for (int64_t k = 0 ; k < ni ; k++) { I2 [k] = I [k] ; } GB_qsort_1 ((int64_t *) I2, ni) ; //-------------------------------------------------------------------------- // remove duplicates from I2 //-------------------------------------------------------------------------- int64_t ni2 = 1 ; for (int64_t k = 1 ; k < ni ; k++) { if (I2 [ni2-1] != I2 [k]) { I2 [ni2++] = I2 [k] ; } } //-------------------------------------------------------------------------- // return the new sorted list //-------------------------------------------------------------------------- *p_I2 = I2 ; // I2 has size ni, but only I2 [0..ni2-1] is defined *p_ni = ni2 ; return (GrB_SUCCESS) ; }
GrB_Info GB_to_hyper // convert a matrix to hypersparse ( GrB_Matrix A, // matrix to convert to hypersparse GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- // GB_subref_numeric can return a matrix with jumbled columns, since it // soon be transposed (and sorted) in GB_accum_mask. However, it passes // the jumbled matrix to GB_to_hyper_conform. This function does not // access the row indices at all, so it works fine if the columns have // jumbled row indices. ASSERT_OK_OR_JUMBLED (GB_check (A, "A converting to hypersparse", GB0)) ; #ifndef NDEBUG GrB_Info info ; #endif //-------------------------------------------------------------------------- // convert A to hypersparse form //-------------------------------------------------------------------------- if (!A->is_hyper) { ASSERT (A->h == NULL) ; ASSERT (A->nvec == A->plen && A->plen == A->vdim) ; //---------------------------------------------------------------------- // count the number of non-empty vectors in A //---------------------------------------------------------------------- int64_t *restrict Ap_old = A->p ; bool Ap_old_shallow = A->p_shallow ; int64_t n = A->vdim ; int64_t nvec_new = A->nvec_nonempty ; //---------------------------------------------------------------------- // allocate the new A->p and A->h //---------------------------------------------------------------------- int64_t *restrict Ap_new ; int64_t *restrict Ah_new ; GB_MALLOC_MEMORY (Ap_new, nvec_new+1, sizeof (int64_t)) ; GB_MALLOC_MEMORY (Ah_new, nvec_new, sizeof (int64_t)) ; if (Ap_new == NULL || Ah_new == NULL) { // out of memory A->is_hyper = true ; // A is hypersparse, but otherwise invalid GB_FREE_MEMORY (Ap_new, nvec_new+1, sizeof (int64_t)) ; GB_FREE_MEMORY (Ah_new, nvec_new, sizeof (int64_t)) ; GB_CONTENT_FREE (A) ; return (GB_OUT_OF_MEMORY (GBYTES (2*nvec_new+1, sizeof (int64_t)))); } //---------------------------------------------------------------------- // transplant the new A->p and A->h into the matrix //---------------------------------------------------------------------- // this must be done here so that GB_jappend, just below, can be used. A->is_hyper = true ; A->plen = nvec_new ; A->nvec = 0 ; A->p = Ap_new ; A->h = Ah_new ; A->p_shallow = false ; A->h_shallow = false ; //---------------------------------------------------------------------- // construct the new hyperlist in the new A->p and A->h //---------------------------------------------------------------------- int64_t jlast, anz, anz_last ; GB_jstartup (A, &jlast, &anz, &anz_last) ; for (int64_t j = 0 ; j < n ; j++) { anz = Ap_old [j+1] ; ASSERT (A->nvec <= A->plen) ; #ifndef NDEBUG info = #endif GB_jappend (A, j, &jlast, anz, &anz_last, Context) ; ASSERT (info == GrB_SUCCESS) ; ASSERT (A->nvec <= A->plen) ; } GB_jwrapup (A, jlast, anz) ; ASSERT (A->nvec == nvec_new) ; ASSERT (A->nvec_nonempty == nvec_new) ; //---------------------------------------------------------------------- // free the old A->p unless it's shallow //---------------------------------------------------------------------- // this cannot use GB_ph_free because the new A->p content has already // been placed into A, as required by GB_jappend just above. if (!Ap_old_shallow) { GB_FREE_MEMORY (Ap_old, n+1, sizeof (int64_t)) ; } } //-------------------------------------------------------------------------- // A is now in hypersparse form //-------------------------------------------------------------------------- ASSERT_OK_OR_JUMBLED (GB_check (A, "A converted to hypersparse", GB0)) ; ASSERT (A->is_hyper) ; return (GrB_SUCCESS) ; }