コード例 #1
0
ファイル: alloc.c プロジェクト: waterson/tinysoar
/*
 * Scan the heap until we find a block large enough to fulfill the
 * request.
 */
void *
malloc(unsigned sz)
{
    block_header_t *header, *next;

    /* Align the request. */
    sz += MIN_REQUEST - 1;
    sz &= ~(MIN_REQUEST - 1);

    /* First fit. */
    for (header = (block_header_t *) heap_start;
         (char *) header < heap_end;
         header = next) {
        next = (block_header_t *)
            ((char *) header
             + sizeof(block_header_t)
             + GET_BLOCK_SIZE(*header));

        if (GET_PREV_FREE(*next) && sz <= GET_BLOCK_SIZE(*header)) {
            /* We can fit the request in this block. */
            void *result = (void *)((char *) header + sizeof(block_header_t));

            if (GET_BLOCK_SIZE(*header) < sz + sizeof(block_header_t) + MIN_REQUEST) {
                /* We can't fit any other requests here, though. */
                CLEAR_PREV_FREE(*next);
            }
            else {
                /* Split the block. */
                struct block_footer *footer =
                    (struct block_footer *)
                    ((char *) next - sizeof(struct block_footer));

                unsigned remaining = GET_BLOCK_SIZE(*header) - sz - sizeof(block_header_t);

                SET_BLOCK_SIZE(*header, sz);

                header = (block_header_t *)
                    ((char *) header
                     + sizeof(block_header_t)
                     + sz);

                ASSERT(remaining % 2 == 0, ("odd block size"));
                INIT_BLOCK_HEADER(*header, remaining, 0);

                footer->header = header;
            }

            return result;
        }
    }

    /* Uh oh, couldn't allocate! */
    panic();
    return 0;
}
コード例 #2
0
void buddyFree(void * ptr) {
	Address blockStart = (Address)(ptr - heap) - 4*sizeof(Address);
	size_t blockSize = GET_BLOCK_SIZE(blockStart);
	Address buddyAddress = _getBuddyAddress(blockStart, blockSize);
	int buddySide = _getBuddySide(blockStart, blockSize);
	TRACE_DUMP("Deallocating %s buddy (neighbour at %lu).", 
		(buddySide == BUDDY_SIDE_LEFT ? "left" : "right"), buddyAddress);
	
	// in all cases, mark as unused
	MARK_BLOCK_FREE(blockStart);
	
	
	// while buddy is free, merge them and go one level up
	while (IS_BLOCK_FREE(buddyAddress)) {
		TRACE_DUMP("Merging...");
		// deattach the buddy
		_removeBlockFromList(buddyAddress, blockSize);
		
		// find the new block start
		blockStart = buddyAddress < blockStart ? buddyAddress : blockStart;

		blockSize++;
		// look for parent one
		if (MAX_BLOCK_SIZE == blockSize) {
			// at the very top, heap is completely free
			break;
		}
		buddyAddress = _getBuddyAddress(blockStart, blockSize);
		TRACE_DUMP("New block start is %lu, buddy at %lu.", blockStart, buddyAddress);
	}
	
	SET_BLOCK_SIZE(blockStart, blockSize);
	_addBlockToList(blockStart, blockSize);
}
コード例 #3
0
ファイル: alloc.c プロジェクト: waterson/tinysoar
/*
 * Return the amount of free space in the heap.
 */
int
heap_free()
{
    int nfree = 0;
    block_header_t *header, *next;

    for (header = (block_header_t *) heap_start;
         (char *) header < heap_end;
         header = next) {
        next = (block_header_t *)
            ((char *) header
             + sizeof(block_header_t)
             + GET_BLOCK_SIZE(*header));

        if (GET_PREV_FREE(*next))
            nfree += GET_BLOCK_SIZE(*header);
    }

    return nfree;
}
コード例 #4
0
ファイル: alloc.c プロジェクト: waterson/tinysoar
/*
 * Free the block, coalescing with the previous and next blocks if
 * possible.
 */
void
free(void *ptr)
{
    block_header_t *header = (block_header_t *)
        ((char *) ptr - sizeof(block_header_t));

    block_header_t *next = (block_header_t *)
        ((char *) ptr + GET_BLOCK_SIZE(*header));

    block_header_t *next_next = (block_header_t *)
        ((char *) next + sizeof(block_header_t) + GET_BLOCK_SIZE(*next));

    struct block_footer *footer;
    unsigned size;

    if ((char *) next_next < heap_end && GET_PREV_FREE(*next_next)) {
        /* The block following us is free. */
        next = next_next;
    }

    if (GET_PREV_FREE(*header)) {
        /* The block prior to us is free. */
        footer = (struct block_footer *)
            ((char *) header - sizeof(struct block_footer));

        header = footer->header;
    }

    footer = (struct block_footer *)
        ((char *) next - sizeof(struct block_footer));

    footer->header = header;

    /* Expand the block to encompass the reclaimed space. */
    size = (char *) next - (char *) header - sizeof(block_header_t);
    ASSERT(size % 2 == 0, ("odd block size"));
    SET_BLOCK_SIZE(*header, size);

    /* Note in the header of the _next_ block that this block is free. */
    MARK_PREV_FREE(*next);
}
コード例 #5
0
ファイル: alloc.c プロジェクト: waterson/tinysoar
/*
 * Dump the heap to debug it.
 */
void
heap_walk()
{
    block_header_t *header, *next;

    printf("heap_begin=0x%p, heap_end=0x%p\n",
           heap_start, heap_end);

    for (header = (block_header_t *) heap_start;
         (char *) header < heap_end;
         header = next) {
        next = (block_header_t *)
            ((char *) header
             + sizeof(block_header_t)
             + GET_BLOCK_SIZE(*header));

        printf("%p heap_start+%04x size=%04x %s",
               header,
               (char *) header - heap_start,
               GET_BLOCK_SIZE(*header),
               GET_PREV_FREE(*next) ? "free" : "in use");

        if (GET_PREV_FREE(*next)) {
            struct block_footer *footer =
                (struct block_footer *)
                ((char *) next - sizeof(struct block_footer));

            if (footer->header != header) {
                printf(" BAD FOOTER, footer->header=%p heap_start+%04x",
                       footer->header,
                       (char *) footer->header - heap_start);
            }
        }

        printf("\n");
    }
}
コード例 #6
0
/*
 * structure_status () shows the status of all the blocks
 */
void structure_status (char *block_ptr) {
  block_header_t *b;
  TLSF_t *ptr_TLSF;
  int end = 0, end2 = 0;
  __u32 *ptr_following;
  
  ptr_TLSF = (TLSF_t *) block_ptr; 
  if (!ptr_TLSF || ptr_TLSF -> magic_number != MAGIC_NUMBER) {
    PRINT_MSG 
      ("structure_status() error: TLSF structure is not initialized\n");
    PRINT_MSG 
      ("Hint: Execute init_memory_pool() before calling structure_status()");
      return;
  }

  PRINT_DBG_C ("\nTLSF structure address 0x");
  PRINT_DBG_H (ptr_TLSF);
  PRINT_DBG_C ("\nMax. first level index: ");
  PRINT_DBG_D (ptr_TLSF -> max_fl_index);
  PRINT_DBG_C ("\nMax. second level index: ");
  PRINT_DBG_D (ptr_TLSF -> max_sl_index);
  PRINT_DBG_C ("\n\nALL BLOCKS\n");
  
  ptr_following = ptr_TLSF -> following_non_cont_bh;
  while (!end2) {
    end = 0;
    b = (block_header_t *) (ptr_following + sizeof (__u32 *));
    
    while (!end) {
      print_block (b);
      if (IS_LAST_BLOCK(b))
	end = 1;
      else
	b = (block_header_t *) (b -> ptr.buffer + 
			        TLSF_WORDS2BYTES (GET_BLOCK_SIZE(b)));
    }
    if (!(__u32 *) *ptr_following) 
      end2 = 1;
    else {
      ptr_following = (__u32 *) *ptr_following;
    }    
  }
}
コード例 #7
0
ファイル: umf_mem_alloc_element.c プロジェクト: rforge/matrix
GLOBAL Int UMF_mem_alloc_element
(
    NumericType *Numeric,
    Int nrows,
    Int ncols,
    Int **Rows,
    Int **Cols,
    Entry **C,
    Int *size,
    Element **epout
)
{

    Element *ep ;
    Unit *p ;
    Int i ;

    ASSERT (Numeric != (NumericType *) NULL) ;
    ASSERT (Numeric->Memory != (Unit *) NULL) ;

    *size = GET_ELEMENT_SIZE (nrows, ncols) ;
    if (INT_OVERFLOW (DGET_ELEMENT_SIZE (nrows, ncols) + 1))
    {
	/* :: allocate element, int overflow :: */
	return (0) ;	/* problem is too large */
    }

    i = UMF_mem_alloc_tail_block (Numeric, *size) ;
    (*size)++ ;
    if (!i)
    {
	DEBUG0 (("alloc element failed - out of memory\n")) ;
	return (0) ;	/* out of memory */
    }
    p = Numeric->Memory + i ;

    ep = (Element *) p ;

    DEBUG2 (("alloc_element done ("ID" x "ID"): p: "ID" i "ID"\n",
	nrows, ncols, (Int) (p-Numeric->Memory), i)) ;

    /* Element data structure, in order: */
    p += UNITS (Element, 1) ;		/* (1) Element header */
    *Cols = (Int *) p ;			/* (2) col [0..ncols-1] indices */
    *Rows = *Cols + ncols ;		/* (3) row [0..nrows-1] indices */
    p += UNITS (Int, ncols + nrows) ;
    *C = (Entry *) p ;			/* (4) C [0..nrows-1, 0..ncols-1] */

    ep->nrows = nrows ;		/* initialize the header information */
    ep->ncols = ncols ;
    ep->nrowsleft = nrows ;
    ep->ncolsleft = ncols ;
    ep->cdeg = 0 ;
    ep->rdeg = 0 ;
    ep->next = EMPTY ;

    DEBUG2 (("new block size: "ID" ", GET_BLOCK_SIZE (Numeric->Memory + i))) ;
    DEBUG2 (("Element size needed "ID"\n", GET_ELEMENT_SIZE (nrows, ncols))) ;

    *epout = ep ;

    /* return the offset into Numeric->Memory */
    return (i) ;
}
コード例 #8
0
ファイル: fast_mblock.c プロジェクト: Qihoo360/huststore
int fast_mblock_manager_stat_print(const bool hide_empty)
{
    int result;
    int count;
    int alloc_size;
    struct fast_mblock_info *stats;
    struct fast_mblock_info *pStat;
    struct fast_mblock_info *stat_end;

    stats = NULL;
    count = 0;
    alloc_size = 64;
    result = EOVERFLOW;
    while (result == EOVERFLOW)
    {
        alloc_size *= 2;
        stats = realloc(stats, sizeof(struct fast_mblock_info) * alloc_size);
        if (stats == NULL)
        {
            return ENOMEM;
        }
        result = fast_mblock_manager_stat(stats,
                alloc_size, &count);
    }

    if (result == 0)
    {
        int64_t alloc_mem;
        int64_t used_mem;
        int64_t amem;
        char alloc_mem_str[32];
        char used_mem_str[32];

		qsort(stats, count, sizeof(struct fast_mblock_info), fast_mblock_info_cmp);

        alloc_mem = 0;
        used_mem = 0;
        /*logInfo("%20s %12s %8s %12s %10s %10s %14s %12s %12s", "name", "element_size",
                "instance", "alloc_bytes", "trunc_alloc", "trunk_used",
                "element_alloc", "element_used", "used_ratio");*/
        stat_end = stats + count;
        for (pStat=stats; pStat<stat_end; pStat++)
        {
            if (pStat->trunk_total_count > 0)
            {
        amem = pStat->trunk_size * pStat->trunk_total_count;
                alloc_mem += amem;
                used_mem += GET_BLOCK_SIZE(*pStat) * pStat->element_used_count;
            }
            else
            {
        amem = 0;
                if (hide_empty)
                {
                    continue;
                }
            }

            /*logInfo("%20s %12d %8d %12"PRId64" %10d %10d %14d %12d %11.2f%%", pStat->name,
                    pStat->element_size, pStat->instance_count, amem,
                    pStat->trunk_total_count, pStat->trunk_used_count,
                    pStat->element_total_count, pStat->element_used_count,
                    pStat->element_total_count > 0 ? 100.00 * (double)
                    pStat->element_used_count / (double)
                    pStat->element_total_count : 0.00);*/
        }

        if (alloc_mem < 1024)
        {
            sprintf(alloc_mem_str, "%"PRId64" bytes", alloc_mem);
            sprintf(used_mem_str, "%"PRId64" bytes", used_mem);
        }
        else if (alloc_mem < 1024 * 1024)
        {
            sprintf(alloc_mem_str, "%.3f KB", (double)alloc_mem / 1024);
            sprintf(used_mem_str, "%.3f KB", (double)used_mem / 1024);
        }
        else if (alloc_mem < 1024 * 1024 * 1024)
        {
            sprintf(alloc_mem_str, "%.3f MB", (double)alloc_mem / (1024 * 1024));
            sprintf(used_mem_str, "%.3f MB", (double)used_mem / (1024 * 1024));
        }
        else
        {
            sprintf(alloc_mem_str, "%.3f GB", (double)alloc_mem / (1024 * 1024 * 1024));
            sprintf(used_mem_str, "%.3f GB", (double)used_mem / (1024 * 1024 * 1024));
        }

        /*logInfo("mblock entry count: %d, alloc memory: %s, used memory: %s, used ratio: %.2f%%",
                count, alloc_mem_str, used_mem_str,
                alloc_mem > 0 ? 100.00 * (double)used_mem / alloc_mem : 0.00);*/
    }

    if (stats != NULL) free(stats);
    return 0;
}
コード例 #9
0
ファイル: TLSF_malloc.c プロジェクト: akshar100/SPaRK_x86
void *MALLOC_FUNCTION_EX (size_t size, char *block_ptr) {
    TLSF_t *ptr_TLSF;
#ifdef SANITY_CHECK
    __u32 req_size = size;
#endif
    __s32 fl, sl;
    __u32 old_size, last_block, aux_size, new_size;
    block_header_t *bh, *bh2, *bh3;

    // spark_print("Inside malloc\n");

    ptr_TLSF = (TLSF_t *) block_ptr;

#ifdef SANITY_CHECK
    checking_structure(ptr_TLSF, "Entering Malloc");
    check_range_ptr (block_ptr, "malloc 1");
#endif

    if (!ptr_TLSF || ptr_TLSF -> magic_number != MAGIC_NUMBER) {
//    PRINT_MSG ("malloc() error: TLSF structure is not initialized\n");
//    PRINT_MSG
//      ("Hint: Execute init_memory_pool() before calling malloc()");
        return NULL;
    }

    if (!size) {
//    PRINT_MSG ("malloc() error: requested size must be > 0\n");
        return NULL;
    }

    // Requested size must be translated in TLSF_WORDS
    old_size = BYTES2TLSF_WORDS(size);

    if (old_size < MIN_SIZE) {
        size = MIN_SIZE;
        fl = 0;
        sl = 0;
    } else {

        mapping_function (old_size, &fl, &sl, &size, ptr_TLSF);
#ifdef SANITY_CHECK
        check_fl_sl (fl, sl, ptr_TLSF, "malloc 1");
#endif

        if (++sl == ptr_TLSF -> max_sl_index) {
            fl ++;
            sl = 0;
        }

        /*
         * This is the reason of the internal fragmentation
         * The block given is greater that the asked for size
         */

        // The TLSF structure begins indexing size on MIN_LOG2_SIZE
        fl -= MIN_LOG2_SIZE;

    }

#ifdef SANITY_CHECK
    if (req_size > TLSF_WORDS2BYTES(size)) {
        SANITY_PRINTF("SANITY error: resquested %d given %d\n", req_size,
                      TLSF_WORDS2BYTES(size));
    }
    check_fl_sl_2 (fl, sl, ptr_TLSF, "malloc 2");
#endif

    /*----------------------------------------*/
    /* The search for a free block begins now */
    /*----------------------------------------*/

    /*
     * Our first try, we take the first free block
     * from fl_array or its buddy
     */

    THREAD_LOCK();

    sl = ptr_TLSF -> fl_array[fl].bitmapSL & ((~0) << sl);
    if (sl != 0) {
        sl = TLSF_fls(sl);
#ifdef SANITY_CHECK
        check_fl_sl_2 (fl, sl, ptr_TLSF, "malloc 3");
#endif
        goto found;
    }

    /*
     * On the last case a free block is looked for using the bitmaps
     */
    fl = TLSF_fls(ptr_TLSF -> bitmapFL & ((~0) << (fl + 1)));

    if (fl > 0) {
        sl = TLSF_fls(ptr_TLSF -> fl_array[fl].bitmapSL);
#ifdef SANITY_CHECK
        check_fl_sl_2 (fl, sl, ptr_TLSF, "malloc 4");
#endif
        goto found;
    }

    /*
     * HUGGGG, NOT ENOUGHT MEMORY
     * I think that we have done all that we have been able, I'm sorry
     */

    THREAD_UNLOCK();
//  PRINT_MSG ("malloc() error: Memory pool exhausted!!!\n");
//  PRINT_MSG ("Hint: You can add memory through add_new_block()\n");
//  PRINT_MSG ("Hint: However this is not a real-time guaranteed way\n");

    return NULL;

    /* end of the search */
    /*------------------------------------------------------------*/


    /*
     * we can say: YESSSSSSSSSSS, we have enought memory!!!!
     */

found:
    bh = ptr_TLSF -> fl_array [fl].sl_array [sl];

#ifdef SANITY_CHECK
    check_range_bh (bh, "malloc 1");
    check_mn (bh, "malloc 1");
#endif

    ptr_TLSF -> fl_array [fl].sl_array [sl] = bh -> ptr.free_ptr.next;
#ifdef SANITY_CHECK
    bh3 = ptr_TLSF -> fl_array[fl].sl_array[sl];
    if (bh3 != NULL) {
        check_range_bh (bh3, "malloc 2");
        check_mn (bh3, "malloc 2");
    }
#endif

    if (ptr_TLSF -> fl_array [fl].sl_array [sl]) {
        ptr_TLSF -> fl_array [fl].sl_array [sl] -> ptr.free_ptr.prev = NULL;
    } else {
        TLSF__clear_bit (sl, ptr_TLSF -> fl_array[fl].bitmapSL);
        if (!ptr_TLSF -> fl_array[fl].bitmapSL)
            TLSF__clear_bit (fl, ptr_TLSF -> bitmapFL);
    }

    /* can bh be splitted? */

    new_size = (int)(GET_BLOCK_SIZE(bh) - size -  beg_header_overhead);
    /* The result of the substraction, may be negative... but new_size is unsigned */
    if ((int) new_size >= (int) MIN_SIZE) {
        /*
         * Yes, bh will be splitted into two blocks
         */

        /* The new block will begin at the end of the current block */
        /* */
        last_block = IS_LAST_BLOCK(bh)?1:0;
        bh -> size = size;
        SET_USED_BLOCK(bh);

        bh2 = (block_header_t *) (bh -> ptr.buffer +
                                  TLSF_WORDS2BYTES
                                  (GET_BLOCK_SIZE(bh)));
#ifdef SANITY_CHECK
        bh2 -> mw = MAGIC_NUMBER;
#endif

        bh2 -> prev_phys_block = bh;
        bh2 -> size = new_size;

        if (last_block) SET_LAST_BLOCK (bh2);

        //aux_size = GET_BLOCK_SIZE(bh2);

        if (new_size < ptr_TLSF ->  TLSF_max_struct_size) {
            mapping_function (new_size, &fl, &sl, &aux_size, ptr_TLSF);
#ifdef SANITY_CHECK
            check_fl_sl (fl, sl, ptr_TLSF, "malloc 5");
#endif
        } else {
            fl =  ptr_TLSF -> max_fl_index - 1;
            sl = ptr_TLSF -> max_sl_index - 1;
        }

        fl -= MIN_LOG2_SIZE;
#ifdef SANITY_CHECK
        check_fl_sl_2 (fl, sl, ptr_TLSF, "malloc 6");
#endif
        init_and_insert_block (ptr_TLSF, bh2, fl, sl);

#ifdef SANITY_CHECK
        check_range_bh (bh2, "malloc 3");
        check_mn (bh2, "malloc 3");
#endif

        if (!last_block) {
            bh3 = (block_header_t *) (bh2 -> ptr.buffer +
                                      TLSF_WORDS2BYTES(new_size));

            bh3 -> prev_phys_block = bh2;

#ifdef SANITY_CHECK
            check_range_bh (bh3, "malloc 4");
            check_mn (bh3, "malloc 4");
#endif
        }
    }

    SET_USED_BLOCK(bh);

    THREAD_UNLOCK();

#ifdef SANITY_CHECK
    checking_structure (ptr_TLSF, "Leaving Malloc");
#endif

    // spark_print("Leaving malloc\n");
    return (void *)  bh -> ptr.buffer;
}
コード例 #10
0
ファイル: umf_create_element.c プロジェクト: naskiz/deal.II
GLOBAL Int UMF_create_element
(
    NumericType *Numeric,
    WorkType *Work,
    SymbolicType *Symbolic
)
{
    /* ---------------------------------------------------------------------- */
    /* local variables */
    /* ---------------------------------------------------------------------- */

    Int j, col, row, *Fcols, *Frows, fnrows, fncols, *Cols, len, needunits, t1,
        t2, size, e, i, *E, *Fcpos, *Frpos, *Rows, eloc, fnr_curr, f,
        got_memory, *Row_tuples, *Row_degree, *Row_tlen, *Col_tuples, max_mark,
        *Col_degree, *Col_tlen, nn, n_row, n_col, r2, c2, do_Fcpos ;
    Entry *C, *Fcol ;
    Element *ep ;
    Unit *p, *Memory ;
    Tuple *tp, *tp1, *tp2, tuple, *tpend ;
#ifndef NDEBUG
    DEBUG2 (("FRONTAL WRAPUP\n")) ;
    UMF_dump_current_front (Numeric, Work, TRUE) ;
#endif

    /* ---------------------------------------------------------------------- */
    /* get parameters */
    /* ---------------------------------------------------------------------- */

    ASSERT (Work->fnpiv == 0) ;
    ASSERT (Work->fnzeros == 0) ;
    Row_degree = Numeric->Rperm ;
    Row_tuples = Numeric->Uip ;
    Row_tlen   = Numeric->Uilen ;
    Col_degree = Numeric->Cperm ;
    Col_tuples = Numeric->Lip ;
    Col_tlen   = Numeric->Lilen ;
    n_row = Work->n_row ;
    n_col = Work->n_col ;
    nn = MAX (n_row, n_col) ;
    Fcols = Work->Fcols ;
    Frows = Work->Frows ;
    Fcpos = Work->Fcpos ;
    Frpos = Work->Frpos ;
    Memory = Numeric->Memory ;
    fncols = Work->fncols ;
    fnrows = Work->fnrows ;

    tp = (Tuple *) NULL ;
    tp1 = (Tuple *) NULL ;
    tp2 = (Tuple *) NULL ;

    /* ---------------------------------------------------------------------- */
    /* add the current frontal matrix to the degrees of each column */
    /* ---------------------------------------------------------------------- */

    if (!Symbolic->fixQ)
    {
        /* but only if the column ordering is not fixed */
#pragma ivdep
        for (j = 0 ; j < fncols ; j++)
        {
            /* add the current frontal matrix to the degree */
            ASSERT (Fcols [j] >= 0 && Fcols [j] < n_col) ;
            Col_degree [Fcols [j]] += fnrows ;
        }
    }

    /* ---------------------------------------------------------------------- */
    /* add the current frontal matrix to the degrees of each row */
    /* ---------------------------------------------------------------------- */

#pragma ivdep
    for (i = 0 ; i < fnrows ; i++)
    {
        /* add the current frontal matrix to the degree */
        ASSERT (Frows [i] >= 0 && Frows [i] < n_row) ;
        Row_degree [Frows [i]] += fncols ;
    }

    /* ---------------------------------------------------------------------- */
    /* Reset the external degree counters */
    /* ---------------------------------------------------------------------- */

    E = Work->E ;
    max_mark = MAX_MARK (nn) ;

    if (!Work->pivcol_in_front)
    {
        /* clear the external column degrees. no more Usons of current front */
        Work->cdeg0 += (nn + 1) ;
        if (Work->cdeg0 >= max_mark)
        {
            /* guard against integer overflow.  This is very rare */
            DEBUG1 (("Integer overflow, cdeg\n")) ;
            Work->cdeg0 = 1 ;
#pragma ivdep
            for (e = 1 ; e <= Work->nel ; e++)
            {
                if (E [e])
                {
                    ep = (Element *) (Memory + E [e]) ;
                    ep->cdeg = 0 ;
                }
            }
        }
    }

    if (!Work->pivrow_in_front)
    {
        /* clear the external row degrees.  no more Lsons of current front */
        Work->rdeg0 += (nn + 1) ;
        if (Work->rdeg0 >= max_mark)
        {
            /* guard against integer overflow.  This is very rare */
            DEBUG1 (("Integer overflow, rdeg\n")) ;
            Work->rdeg0 = 1 ;
#pragma ivdep
            for (e = 1 ; e <= Work->nel ; e++)
            {
                if (E [e])
                {
                    ep = (Element *) (Memory + E [e]) ;
                    ep->rdeg = 0 ;
                }
            }
        }
    }

    /* ---------------------------------------------------------------------- */
    /* clear row/col offsets */
    /* ---------------------------------------------------------------------- */

    if (!Work->pivrow_in_front)
    {
#pragma ivdep
        for (j = 0 ; j < fncols ; j++)
        {
            Fcpos [Fcols [j]] = EMPTY ;
        }
    }

    if (!Work->pivcol_in_front)
    {
#pragma ivdep
        for (i = 0 ; i < fnrows ; i++)
        {
            Frpos [Frows [i]] = EMPTY ;
        }
    }

    if (fncols <= 0 || fnrows <= 0)
    {
        /* no element to create */
        DEBUG2 (("Element evaporation\n")) ;
        Work->prior_element = EMPTY ;
        return (TRUE) ;
    }

    /* ---------------------------------------------------------------------- */
    /* create element for later assembly */
    /* ---------------------------------------------------------------------- */

#ifndef NDEBUG
    UMF_allocfail = FALSE ;
    if (UMF_gprob > 0)
    {
        double rrr = ((double) (rand ( ))) / (((double) RAND_MAX) + 1) ;
        DEBUG4 (("Check random %e %e\n", rrr, UMF_gprob)) ;
        UMF_allocfail = rrr < UMF_gprob ;
        if (UMF_allocfail) DEBUGm2 (("Random garbage collection (create)\n"));
    }
#endif

    needunits = 0 ;
    got_memory = FALSE ;
    eloc = UMF_mem_alloc_element (Numeric, fnrows, fncols, &Rows, &Cols, &C,
                                  &needunits, &ep) ;

    /* if UMF_get_memory needs to be called */
    if (Work->do_grow)
    {
        /* full compaction of current frontal matrix, since UMF_grow_front will
         * be called next anyway. */
        r2 = fnrows ;
        c2 = fncols ;
        do_Fcpos = FALSE ;
    }
    else
    {
        /* partial compaction. */
        r2 = MAX (fnrows, Work->fnrows_new + 1) ;
        c2 = MAX (fncols, Work->fncols_new + 1) ;
        /* recompute Fcpos if pivot row is in the front */
        do_Fcpos = Work->pivrow_in_front ;
    }

    if (!eloc)
    {
        /* Do garbage collection, realloc, and try again. */
        /* Compact the current front if it needs to grow anyway. */
        /* Note that there are no pivot rows or columns in the current front */
        DEBUGm3 (("get_memory from umf_create_element, 1\n")) ;
        if (!UMF_get_memory (Numeric, Work, needunits, r2, c2, do_Fcpos))
        {
            /* :: out of memory in umf_create_element (1) :: */
            DEBUGm4 (("out of memory: create element (1)\n")) ;
            return (FALSE) ;	/* out of memory */
        }
        got_memory = TRUE ;
        Memory = Numeric->Memory ;
        eloc = UMF_mem_alloc_element (Numeric, fnrows, fncols, &Rows, &Cols, &C,
                                      &needunits, &ep) ;
        ASSERT (eloc >= 0) ;
        if (!eloc)
        {
            /* :: out of memory in umf_create_element (2) :: */
            DEBUGm4 (("out of memory: create element (2)\n")) ;
            return (FALSE) ;	/* out of memory */
        }
    }

    e = ++(Work->nel) ;	/* get the name of this new frontal matrix */
    Work->prior_element = e ;
    DEBUG8 (("wrapup e "ID" nel "ID"\n", e, Work->nel)) ;

    ASSERT (e > 0 && e < Work->elen) ;
    ASSERT (E [e] == 0) ;
    E [e] = eloc ;

    if (Work->pivcol_in_front)
    {
        /* the new element is a Uson of the next frontal matrix */
        ep->cdeg = Work->cdeg0 ;
    }

    if (Work->pivrow_in_front)
    {
        /* the new element is an Lson of the next frontal matrix */
        ep->rdeg = Work->rdeg0 ;
    }

    /* ---------------------------------------------------------------------- */
    /* copy frontal matrix into the new element */
    /* ---------------------------------------------------------------------- */

#pragma ivdep
    for (i = 0 ; i < fnrows ; i++)
    {
        Rows [i] = Frows [i] ;
    }
#pragma ivdep
    for (i = 0 ; i < fncols ; i++)
    {
        Cols [i] = Fcols [i] ;
    }
    Fcol = Work->Fcblock ;
    DEBUG0 (("copy front "ID" by "ID"\n", fnrows, fncols)) ;
    fnr_curr = Work->fnr_curr ;
    ASSERT (fnr_curr >= 0 && fnr_curr % 2 == 1) ;
    for (j = 0 ; j < fncols ; j++)
    {
        copy_column (fnrows, Fcol, C) ;
        Fcol += fnr_curr ;
        C += fnrows ;
    }

    DEBUG8 (("element copied\n")) ;

    /* ---------------------------------------------------------------------- */
    /* add tuples for the new element */
    /* ---------------------------------------------------------------------- */

    tuple.e = e ;

    if (got_memory)
    {

        /* ------------------------------------------------------------------ */
        /* UMF_get_memory ensures enough space exists for each new tuple */
        /* ------------------------------------------------------------------ */

        /* place (e,f) in the element list of each column */
        for (tuple.f = 0 ; tuple.f < fncols ; tuple.f++)
        {
            col = Fcols [tuple.f] ;
            ASSERT (col >= 0 && col < n_col) ;
            ASSERT (NON_PIVOTAL_COL (col)) ;
            ASSERT (Col_tuples [col]) ;
            tp = ((Tuple *) (Memory + Col_tuples [col])) + Col_tlen [col]++ ;
            *tp = tuple ;
        }

        /* ------------------------------------------------------------------ */

        /* place (e,f) in the element list of each row */
        for (tuple.f = 0 ; tuple.f < fnrows ; tuple.f++)
        {
            row = Frows [tuple.f] ;
            ASSERT (row >= 0 && row < n_row) ;
            ASSERT (NON_PIVOTAL_ROW (row)) ;
            ASSERT (Row_tuples [row]) ;
            tp = ((Tuple *) (Memory + Row_tuples [row])) + Row_tlen [row]++ ;
            *tp = tuple ;
        }

    }
    else
    {

        /* ------------------------------------------------------------------ */
        /* place (e,f) in the element list of each column */
        /* ------------------------------------------------------------------ */

        /* might not have enough space for each tuple */

        for (tuple.f = 0 ; tuple.f < fncols ; tuple.f++)
        {
            col = Fcols [tuple.f] ;
            ASSERT (col >= 0 && col < n_col) ;
            ASSERT (NON_PIVOTAL_COL (col)) ;
            t1 = Col_tuples [col] ;
            DEBUG1 (("Placing on col:"ID" , tuples at "ID"\n",
                     col, Col_tuples [col])) ;

            size = 0 ;
            len = 0 ;

            if (t1)
            {
                p = Memory + t1 ;
                tp = (Tuple *) p ;
                size = GET_BLOCK_SIZE (p) ;
                len = Col_tlen [col] ;
                tp2 = tp + len ;
            }

            needunits = UNITS (Tuple, len + 1) ;
            DEBUG1 (("len: "ID" size: "ID" needunits: "ID"\n",
                     len, size, needunits));

            if (needunits > size && t1)
            {
                /* prune the tuples */
                tp1 = tp ;
                tp2 = tp ;
                tpend = tp + len ;
                for ( ; tp < tpend ; tp++)
                {
                    e = tp->e ;
                    ASSERT (e > 0 && e <= Work->nel) ;
                    if (!E [e]) continue ;   /* element already deallocated */
                    f = tp->f ;
                    p = Memory + E [e] ;
                    ep = (Element *) p ;
                    p += UNITS (Element, 1) ;
                    Cols = (Int *) p ;
                    ;
                    if (Cols [f] == EMPTY) continue ;	/* already assembled */
                    ASSERT (col == Cols [f]) ;
                    *tp2++ = *tp ;	/* leave the tuple in the list */
                }
                len = tp2 - tp1 ;
                Col_tlen [col] = len ;
                needunits = UNITS (Tuple, len + 1) ;
            }

            if (needunits > size)
            {
                /* no room exists - reallocate elsewhere */
                DEBUG1 (("REALLOCATE Col: "ID", size "ID" to "ID"\n",
                         col, size, 2*needunits)) ;

#ifndef NDEBUG
                UMF_allocfail = FALSE ;
                if (UMF_gprob > 0)  /* a double relop, but ignore NaN case */
                {
                    double rrr = ((double) (rand ( ))) /
                                 (((double) RAND_MAX) + 1) ;
                    DEBUG1 (("Check random %e %e\n", rrr, UMF_gprob)) ;
                    UMF_allocfail = rrr < UMF_gprob ;
                    if (UMF_allocfail) DEBUGm2 (("Random gar. (col tuple)\n")) ;
                }
#endif

                needunits = MIN (2*needunits, (Int) UNITS (Tuple, nn)) ;
                t2 = UMF_mem_alloc_tail_block (Numeric, needunits) ;
                if (!t2)
                {
                    /* :: get memory in umf_create_element (1) :: */
                    /* get memory, reconstruct all tuple lists, and return */
                    /* Compact the current front if it needs to grow anyway. */
                    /* Note: no pivot rows or columns in the current front */
                    DEBUGm4 (("get_memory from umf_create_element, 1\n")) ;
                    return (UMF_get_memory (Numeric, Work, 0, r2, c2,do_Fcpos));
                }
                Col_tuples [col] = t2 ;
                tp2 = (Tuple *) (Memory + t2) ;
                if (t1)
                {
                    for (i = 0 ; i < len ; i++)
                    {
                        *tp2++ = *tp1++ ;
                    }
                    UMF_mem_free_tail_block (Numeric, t1) ;
                }
            }

            /* place the new (e,f) tuple in the element list of the column */
            Col_tlen [col]++ ;
            *tp2 = tuple ;
        }

        /* ------------------------------------------------------------------ */
        /* place (e,f) in the element list of each row */
        /* ------------------------------------------------------------------ */

        for (tuple.f = 0 ; tuple.f < fnrows ; tuple.f++)
        {
            row = Frows [tuple.f] ;
            ASSERT (row >= 0 && row < n_row) ;
            ASSERT (NON_PIVOTAL_ROW (row)) ;
            t1 = Row_tuples [row] ;
            DEBUG1 (("Placing on row:"ID" , tuples at "ID"\n",
                     row, Row_tuples [row])) ;

            size = 0 ;
            len = 0 ;
            if (t1)
            {
                p = Memory + t1 ;
                tp = (Tuple *) p ;
                size = GET_BLOCK_SIZE (p) ;
                len = Row_tlen [row] ;
                tp2 = tp + len ;
            }

            needunits = UNITS (Tuple, len + 1) ;
            DEBUG1 (("len: "ID" size: "ID" needunits: "ID"\n",
                     len, size, needunits)) ;

            if (needunits > size && t1)
            {
                /* prune the tuples */
                tp1 = tp ;
                tp2 = tp ;
                tpend = tp + len ;
                for ( ; tp < tpend ; tp++)
                {
                    e = tp->e ;
                    ASSERT (e > 0 && e <= Work->nel) ;
                    if (!E [e])
                    {
                        continue ;	/* element already deallocated */
                    }
                    f = tp->f ;
                    p = Memory + E [e] ;
                    ep = (Element *) p ;
                    p += UNITS (Element, 1) ;
                    Cols = (Int *) p ;
                    Rows = Cols + (ep->ncols) ;
                    if (Rows [f] == EMPTY) continue ;	/* already assembled */
                    ASSERT (row == Rows [f]) ;
                    *tp2++ = *tp ;	/* leave the tuple in the list */
                }
                len = tp2 - tp1 ;
                Row_tlen [row] = len ;
                needunits = UNITS (Tuple, len + 1) ;
            }

            if (needunits > size)
            {
                /* no room exists - reallocate elsewhere */
                DEBUG1 (("REALLOCATE Row: "ID", size "ID" to "ID"\n",
                         row, size, 2*needunits)) ;

#ifndef NDEBUG
                UMF_allocfail = FALSE ;
                if (UMF_gprob > 0)  /* a double relop, but ignore NaN case */
                {
                    double rrr = ((double) (rand ( ))) /
                                 (((double) RAND_MAX) + 1) ;
                    DEBUG1 (("Check random %e %e\n", rrr, UMF_gprob)) ;
                    UMF_allocfail = rrr < UMF_gprob ;
                    if (UMF_allocfail) DEBUGm2 (("Random gar. (row tuple)\n")) ;
                }
#endif

                needunits = MIN (2*needunits, (Int) UNITS (Tuple, nn)) ;
                t2 = UMF_mem_alloc_tail_block (Numeric, needunits) ;
                if (!t2)
                {
                    /* :: get memory in umf_create_element (2) :: */
                    /* get memory, reconstruct all tuple lists, and return */
                    /* Compact the current front if it needs to grow anyway. */
                    /* Note: no pivot rows or columns in the current front */
                    DEBUGm4 (("get_memory from umf_create_element, 2\n")) ;
                    return (UMF_get_memory (Numeric, Work, 0, r2, c2,do_Fcpos));
                }
                Row_tuples [row] = t2 ;
                tp2 = (Tuple *) (Memory + t2) ;
                if (t1)
                {
                    for (i = 0 ; i < len ; i++)
                    {
                        *tp2++ = *tp1++ ;
                    }
                    UMF_mem_free_tail_block (Numeric, t1) ;
                }
            }

            /* place the new (e,f) tuple in the element list of the row */
            Row_tlen [row]++ ;
            *tp2 = tuple ;
        }

    }

    /* ---------------------------------------------------------------------- */

#ifndef NDEBUG
    DEBUG1 (("Done extending\nFINAL: element row pattern: len="ID"\n", fncols));
    for (j = 0 ; j < fncols ; j++) DEBUG1 ((""ID"\n", Fcols [j])) ;
    DEBUG1 (("FINAL: element col pattern:  len="ID"\n", fnrows)) ;
    for (j = 0 ; j < fnrows ; j++) DEBUG1 ((""ID"\n", Frows [j])) ;
    for (j = 0 ; j < fncols ; j++)
    {
        col = Fcols [j] ;
        ASSERT (col >= 0 && col < n_col) ;
        UMF_dump_rowcol (1, Numeric, Work, col, !Symbolic->fixQ) ;
    }
    for (j = 0 ; j < fnrows ; j++)
    {
        row = Frows [j] ;
        ASSERT (row >= 0 && row < n_row) ;
        UMF_dump_rowcol (0, Numeric, Work, row, TRUE) ;
    }
    if (n_row < 1000 && n_col < 1000)
    {
        UMF_dump_memory (Numeric) ;
    }
    DEBUG1 (("New element, after filling with stuff: "ID"\n", e)) ;
    UMF_dump_element (Numeric, Work, e, TRUE) ;
    if (nn < 1000)
    {
        DEBUG4 (("Matrix dump, after New element: "ID"\n", e)) ;
        UMF_dump_matrix (Numeric, Work, TRUE) ;
    }
    DEBUG3 (("FRONTAL WRAPUP DONE\n")) ;
#endif

    return (TRUE) ;
}