Beispiel #1
0
int yr_arena_reserve_memory(
    YR_ARENA* arena,
    size_t size)
{
  YR_ARENA_PAGE* new_page;
  size_t new_page_size;
  uint8_t* new_page_address;

  if (size > free_space(arena->current_page))
  {
    if (arena->flags & ARENA_FLAGS_FIXED_SIZE)
      return ERROR_INSUFFICIENT_MEMORY;

    // Requested space is bigger than current page's empty space,
    // lets calculate the size for a new page.

    new_page_size = arena->current_page->size * 2;

    while (new_page_size < size)
      new_page_size *= 2;

    if (arena->current_page->used == 0)
    {
      // Current page is not used at all, it can be reallocated.

      new_page_address = (uint8_t*) yr_realloc(
          arena->current_page->address,
          new_page_size);

      if (new_page_address == NULL)
        return ERROR_INSUFFICIENT_MEMORY;

      arena->current_page->address = new_page_address;
      arena->current_page->size = new_page_size;
    }
    else
    {
      new_page = _yr_arena_new_page(new_page_size);

      if (new_page == NULL)
        return ERROR_INSUFFICIENT_MEMORY;

      new_page->prev = arena->current_page;
      arena->current_page->next = new_page;
      arena->current_page = new_page;
      arena->flags &= ~ARENA_FLAGS_COALESCED;
    }
  }

  return ERROR_SUCCESS;
}
Beispiel #2
0
        ///////////////////////////////////////////////////////////////////////
        /// \brief Change the maximum size this cache can grow to
        ///
        /// \param max_size    [in] The new maximum size this cache will be
        ///             allowed to grow to.
        ///
        /// \returns    This function returns \a true if successful. It returns
        ///             \a false if the new \a max_size is smaller than the
        ///             current limit and the cache could not be shrinked to
        ///             the new maximum size.
        bool reserve(size_type max_size)
        {
            // we need to shrink the cache if the new max size if smaller than
            // the old one
            bool retval = true;
            if (max_size && max_size < max_size_ &&
                !free_space(long(max_size_ - max_size)))
            {
                retval = false;     // not able to shrink cache
            }

            max_size_ = max_size;   // change capacity in any case
            return retval;
        }
  int write(const void* buffer, int length) noexcept
  {
    const char* data = (const char*) buffer;
    if (length > free_space()) {
      length = free_space();
      if (length == 0) return 0;
    }

    // check if we are going around the buffer ...
    int wrap = (end + length) - this->cap;

    if (wrap > 0) {
      memcpy(at_end() ,    data, length - wrap);
      memcpy(this->buffer, data + length - wrap, wrap);
    }
    else {
      memcpy(at_end(), data, length);
    }
    this->used += length;
    // make sure it wraps properly around
    this->end = (this->end + length) % capacity();
    return length;
  }
Beispiel #4
0
static int leaf_is_full (void *data, unint len)
{
	leaf_s	*leaf = data;
	int	total = len + sizeof(rec_s);

	if (total <= free_space(leaf)) {
		return FALSE;
	}
	if (total > leaf->l_total) {
		return TRUE;
	}
	compact(leaf);
	return FALSE;
}
Beispiel #5
0
void verify_leaf (tree_s *tree, leaf_s *leaf, char *where)
{
    snint	sum;
    unint	i;

    if (leaf->h_magic != LEAF) {
        eprintf("ERROR:%s leaf=%p magic=%x\n",
                where, leaf, leaf->h_magic);
    }
    if (leaf->l_total < free_space(leaf)) {
        eprintf("ERROR:%s allocation space bigger than total\n",
                where);
        dump_leaf(tree, leaf, 0);
    }
    if (free_space(leaf) < 0) {
        eprintf("ERROR:%s leaf overflow\n", where);
        dump_leaf(tree, leaf, 0);
    }
    sum = sizeof(leaf_s) + (leaf->l_num * sizeof(rec_s));
    for (i = 0; i < leaf->l_num; i++) {
        sum += leaf->l_rec[i].r_size;
    }
    if (PAGE_SIZE - sum != leaf->l_total) {
        eprintf("ERROR:%s totals %ld != %d\n", where,
                PAGE_SIZE - sum, leaf->l_total);
        dump_leaf(tree, leaf, 0);
    }
    for (i = 1; i < leaf->l_num; i++) {
        if (leaf->l_rec[i-1].r_key >= leaf->l_rec[i].r_key) {
            eprintf("ERROR:%s keys out of order %llx >= %llx\n",
                    where,
                    leaf->l_rec[i-1].r_key, leaf->l_rec[i].r_key);
            dump_leaf(tree, leaf, 0);
        }
    }

}
/******************************************************************
* Function to load matrix H from file                             *
******************************************************************/
int pcheck_mat_dense::load(char* filename)
{
	ifstream fh;
	int tmp;

	fh.open(filename);

	if(fh.fail()){
		cout<<"File not found."<<endl;
		return -1;
	}

	/* free previous content */
	if(H != NULL){
		free_space();
	}

	/* read k and n */
	fh >> tmp;
	if(tmp <= 0) {
		cout<<"Wrong value of k."<<endl;
		return -1;
	}
	k = tmp;

	fh >> tmp;
	if(tmp <= 0 || tmp<=k){
		cout<<"Wrong value of n."<<endl;
		return -1;
	}
	n = tmp;

	alloc_space();

	for(int i = 0; i < n - k; i++){
		for(int j = 0; j < n; j++){
			fh >> tmp;
			if(tmp==1) H[i][j] = true;
            else if(tmp==0) H[i][j] = false;
            else {
            	cout<<"Wrong matrix data."<<endl;
            	return -1;
            }
        }
    }
	fh.close();
	return 0;

}
Beispiel #7
0
void compact (leaf_s *leaf)
{
	static char	block[BLK_SIZE];
	leaf_s		*p;
FN;
	// Need a spin lock here
	p = (leaf_s *)block;
	bzero(p, BLK_SIZE);
	p->l_type = leaf->l_type;
	p->l_end  = BLK_SIZE;
	p->l_total = MAX_FREE;

	copy_recs(p, leaf, 0);
	memmove(leaf, p, BLK_SIZE);
	aver(leaf->l_total == free_space(leaf));
	bdirty(leaf);
}
Beispiel #8
0
static void before_collection( young_heap_t *heap )
{
  young_data_t *data = DATA(heap);
  word *globals = data->globals;

  flush_stack( heap );
  heap->maximum = data->heapsize;
  heap->allocated = heap->maximum - free_space( heap );

  if (!data->havestats) {
    data->havestats = TRUE;
    data->los_live = 
      bytes2words(los_bytes_used( heap->collector->los, data->gen_no ));
    data->stack_live = bytes2words(globals[G_STKBOT] - globals[G_STKP]);
    data->not_used = bytes2words(globals[G_STKP] - globals[G_ETOP]);
  }
}
Beispiel #9
0
static void compact (leaf_s *leaf)
{
    static char	block[PAGE_SIZE];
    leaf_s		*p;
    FN;
    // Need a spin lock here
    p = (leaf_s *)block;
    memset(p, 0, PAGE_SIZE);

    p->h_node = leaf->h_node;
    p->h_magic  = leaf->h_magic;
    p->l_end    = PAGE_SIZE;
    p->l_total  = MAX_FREE;

    copy_recs(p, leaf, 0);
    memmove(leaf, p, PAGE_SIZE);
    aver(leaf->l_total == free_space(leaf));
}
Beispiel #10
0
static void compact (leaf_s *leaf)
{
	static char	block[BLK_SIZE];
	leaf_s		*p;
FN;
	// Need a spin lock here
	p = (leaf_s *)block;
	memset(p, 0, BLK_SIZE);

	p->h_blknum = leaf->h_blknum;
	p->h_magic  = leaf->h_magic;
	p->l_end    = BLK_SIZE;
	p->l_total  = MAX_FREE;

	copy_recs(p, leaf, 0);
	memmove(leaf, p, BLK_SIZE);
	assert(leaf->l_total == free_space(leaf));
	tau_blog(leaf);
}
Beispiel #11
0
void VirtioBlk::read (block_t blk, on_read_func func) {
  // Virtio Std. § 5.1.6.3
  auto* vbr = new request_t(blk,
    request_handler_t::make_packed(
    [this, func] (uint8_t* data) {
      if (data != nullptr)
        func(fs::construct_buffer(data, data + block_size()));
      else
        func(nullptr);
    }));

  if (free_space()) {
    shipit(vbr);
    req.kick();
  }
  else {
    jobs.push_back(vbr);
  }
}
/******************************************************************
* Assignment operator                                             *
******************************************************************/
pcheck_mat_dense& pcheck_mat_dense::operator = (pcheck_mat& rhs)
{
	/* free previous content */
	if(H != NULL){
		free_space();
	}
	
	this->n = rhs.get_n();
	this->k = rhs.get_k();
	alloc_space();

	/* copy data */
	for(int i = 0; i < (n - k); i++){
		for(int j = 0; j < n; j++){
			H[i][j] = rhs.get(i,j);
		}
	}

	return *this;
}
BOOL NNLayer::load_model (FILE *f) {
    assert(f != NULL);
    free_space();

    ignore_comment(f);
    if (fscanf(f, "%u %u %lf %lf\n",
               &n_input, &n_neuron, &winit_min, &winit_max) != 4)
        return FALSE;
    if (winit_min >= winit_max) return FALSE;

    if (n_input <= 0 || n_neuron <= 0)
        return FALSE;

    alloc_space();

    for (size_t i = 0; i < n_neuron; i++)
        for (size_t j = 0; j <= n_input; j++)
            if (fscanf(f, "%lf", &weight[i][j]) != 1)
                return FALSE;
    return TRUE;
}
Beispiel #14
0
int
main(int argc, char *argv[])
{
	int opt;
	int hasEnded = 0;
	int i;

	for (i=0; i < MAX_SEM; i++)
		sem_array[i] = SEM_FAILED;

	while ( !hasEnded )
	{
		printf("Escriba la opcion deseada\n1 - OPEN\t2 - WAIT\t3 - POST\t4 - CLOSE\n");
		printf(				  "5 - UNLINK\t6 - GETVALUE\t7 - TRYWAIT\t8 - INIT\n");
		printf(				  "9 - DESTROY\t10 - FREE SPACE\n");
		if ( get_num(&opt) == -1 )
		{
			hasEnded = 1;
			continue;
		}
		
		putchar('\n');
		switch (opt)
		{
			case 1: m_open(); break;
			case 2: m_wait(); break;
			case 3: m_post(); break;
			case 4: m_close(); break;
			case 5: m_unlink(); break;
			case 6: m_getvalue(); break;
			case 7: m_trywait(); break;
			case 8: m_sem_init(); break;
			case 9: m_destroy(); break;
			case 10: free_space(); break;
			default: printf("Opcion invalida\n");
		}
	}
}
int main()
{
    int test;
    
    int terminal=-999;//this is for input text, so the loop knows when to stop

    int size=100;//Initial size
    int i=0;

    int *Data=Create(size);
    
    scanf("%d",&Data[i]);//Read in our initialize variable for our array
    //printf("i:%d and val:%d\n",i,Data[i]);
    i++;
    
    //????Even without increasing the allocated space the array was not Seg faulting
    
    while (Data[i-1]!=terminal)//This looks at the previous posi
    {
        scanf("%d",&Data[i]);
        //printf("i:%d and val:%d\n",i,Data[i]);
        i++;
        if (i==size)//this counter counts up size malloced for array, if true goes into function
        {
            size=size*2;
            Data=Inc_Cap(Data,size);
        }
    }
    printf("\nReading in Data Complete\n");
    size=i-1;//the get rid of the -999 in the array because we dont want that sorted
   
    //printf("Test_shot\n");
    
    int *lsearch_numComp_P;//This is for number of comparisons because we want to return to values from functions
    int *bsearch_numComp_P;
    
    int lsearch_numComp=0;
    int bsearch_numComp=0;
    
    lsearch_numComp_P=&lsearch_numComp;//Take the address of the right because we need to point to that space
    bsearch_numComp_P=&bsearch_numComp;
    
    
    int* toArray=Create(size);//We are mallocing because to we can free it later
    
    arrayCopy(Data,toArray,size);//self- explanatory
    
    sort(Data,size);
    
    i=0;
   
    int target;//This is the value that need to be found for binary and linear search
    scanf("%d",&target);
     //printf("Target Value: %d\n",target);
    while (target!=terminal)
    {
        
        //scanf("%d",&target);
        //printf("Target Value: %d\n",target);
    
        int Print_Lsearch=lsearch(toArray,size,target,lsearch_numComp_P);//reason for this becaue we are sending the index location
    
        if (Print_Lsearch==-1)//because of the above statement is the reason for this "if" statement
        {//we dont want to run the same function twice becasue it would be unneccassay
            printf("Value not found in Linear search\n");
        }
        else
            printf("Target Value:%d found with Linear search in Array position %d with %d comparisons\n",target,Print_Lsearch,*lsearch_numComp_P);
    
    
        int Print_Bsearch=Bsearch(Data,size,target,bsearch_numComp_P);
    
        if (Print_Bsearch==-1)
        {  //Same sort of logic as lsearch explaniation for "if"
            printf("Value not found in Binary search\n");
        }
        else
            printf("Target Value:%d found with Binary search in Array position %d with %d comparisons\n",target,Print_Bsearch,*bsearch_numComp_P);
        
        lsearch_numComp=0;
        bsearch_numComp=0;
        scanf("%d",&target);
    
    }
    
    free_space(Data);
    free_space(toArray);
    
    return 0;
}
Beispiel #16
0
        /// Try to get a connection to \a l from the cache, or reserve space for
        /// a new connection to \a l. This function may evict entries from the
        /// cache.
        ///
        /// \returns If a connection was found in the cache, its value is
        ///          assigned to \a conn and this function returns true. If a
        ///          connection was not found but space was reserved, \a conn is
        ///          set such that conn.get() == 0, and this function returns
        ///          true. If a connection could not be found and space could
        ///          not be returned, \a conn is unmodified and this function
        ///          returns false.
        ///
        /// \note    The connection must be returned to the cache by calling
        ///          \a reclaim().
        bool get_or_reserve(key_type const& l, connection_type& conn)
        {
            mutex_type::scoped_lock lock(mtx_);

            typename cache_type::iterator const it = cache_.find(l);

            // Check if this key already exists in the cache.
            if (it != cache_.end())
            {
                // Key exists in cache.

                // Update LRU meta data.
                key_tracker_.splice(
                    key_tracker_.end()
                  , key_tracker_
                  , boost::get<2>(it->second)
                );

                // If connections to the locality are available in the cache,
                // remove the oldest one and return it.
                if (!boost::get<0>(it->second).empty())
                {
                    conn = boost::get<0>(it->second).front();
                    boost::get<0>(it->second).pop_front();

                    check_invariants();
                    return true;
                }

                // Otherwise, if we have less connections for this locality
                // than the maximum, try to reserve space in the cache for a new
                // connection.
                if (boost::get<1>(it->second) < max_connections_per_locality_)
                {
                    // See if we have enough space or can make space available.
                    // If we can't find or make space, give up.
                    if (!free_space())
                    {
                        check_invariants();
                        return false;
                    }

                    // Make sure the input connection shared_ptr doesn't hold
                    // anything.
                    conn.reset();

                    // Increase the per-locality and overall connection counts.
                    ++boost::get<1>(it->second);
                    ++connections_;

                    check_invariants();
                    return true;
                }

                // We've reached the maximum number of connections for this
                // locality, and none of them are checked into the cache, so
                // we have to give up.
                check_invariants();
                return false;
            }

            // Key isn't in cache.

            // See if we have enough space or can make space available.
            // If we can't find or make space, give up.
            if (!free_space())
            {
                check_invariants();
                return false;
            }

            // Update LRU meta data.
            typename key_tracker_type::iterator kt =
                key_tracker_.insert(key_tracker_.end(), l);

            cache_.insert(
                std::make_pair(l, boost::make_tuple(value_type(), 1, kt)));

            // Make sure the input connection shared_ptr doesn't hold anything.
            conn.reset();

            // Increase the overall connection counts.
            ++connections_;

            check_invariants();
            return true;
        }
Beispiel #17
0
        /// Try to get a connection to \a l from the cache, or reserve space for
        /// a new connection to \a l. This function may evict entries from the
        /// cache.
        ///
        /// \returns If a connection was found in the cache, its value is
        ///          assigned to \a conn and this function returns true. If a
        ///          connection was not found but space was reserved, \a conn is
        ///          set such that conn.get() == 0, and this function returns
        ///          true. If a connection could not be found and space could
        ///          not be returned, \a conn is unmodified and this function
        ///          returns false.
        ///          If force_nsert is true, a new connection entry will be
        ///          created even if that means the cache limits will be
        ///          exceeded.
        ///
        /// \note    The connection must be returned to the cache by calling
        ///          \a reclaim().
        bool get_or_reserve(key_type const& l, connection_type& conn,
            bool force_insert = false)
        {
            std::lock_guard<mutex_type> lock(mtx_);

            typename cache_type::iterator const it = cache_.find(l);

            // Check if this key already exists in the cache.
            if (it != cache_.end())
            {
                // Key exists in cache.

                // Update LRU meta data.
                key_tracker_.splice(
                    key_tracker_.end()
                  , key_tracker_
                  , lru_reference(it->second)
                );

                // If connections to the locality are available in the cache,
                // remove the oldest one and return it.
                if (!cached_connections(it->second).empty())
                {
                    value_type& connections = cached_connections(it->second);
                    conn = connections.front();
                    connections.pop_front();

#if defined(HPX_TRACK_STATE_OF_OUTGOING_TCP_CONNECTION)
                    conn->set_state(Connection::state_reinitialized);
#endif
                    ++hits_;
                    check_invariants();
                    return true;
                }

                // Otherwise, if we have less connections for this locality
                // than the maximum, try to reserve space in the cache for a new
                // connection.
                if (num_existing_connections(it->second) <
                    max_num_connections(it->second) ||
                    force_insert)
                {
                    // See if we have enough space or can make space available.

                    // Note that if we don't have any space and there are no
                    // outstanding connections for this locality, we grow the
                    // cache size beyond its limit (hoping that it will be
                    // reduced in size next time some connection is handed back
                    // to the cache).

                    if (!free_space() &&
                        num_existing_connections(it->second) != 0 &&
                        !force_insert)
                    {
                        // If we can't find or make space, give up.
                        ++misses_;
                        check_invariants();
                        return false;
                    }

                    // Make sure the input connection shared_ptr doesn't hold
                    // anything.
                    conn.reset();

                    // Increase the per-locality and overall connection counts.
                    increment_connection_count(it->second);

                    // Statistics
                    ++insertions_;
                    check_invariants();
                    return true;
                }

                // We've reached the maximum number of connections for this
                // locality, and none of them are checked into the cache, so
                // we have to give up.
                ++misses_;
                check_invariants();
                return false;
            }

            // Key (locality) isn't in cache.

            // See if we have enough space or can make space available.

            // Note that we ignore the outcome of free_space() here as we have
            // to guarantee to have space for the new connection as there are
            // no connections outstanding for this locality. If free_space
            // fails we grow the cache size beyond its limit (hoping that it
            // will be reduced in size next time some connection is handed back
            // to the cache).
            free_space();

            // Update LRU meta data.
            typename key_tracker_type::iterator kt =
                key_tracker_.insert(key_tracker_.end(), l);

            cache_.insert(std::make_pair(
                l, util::make_tuple(
                    value_type(), 1, max_connections_per_locality_, kt
                ))
            );

            // Make sure the input connection shared_ptr doesn't hold anything.
            conn.reset();

            // Increase the overall connection counts.
            ++connections_;

            ++insertions_;
            check_invariants();
            return true;
        }
NNLayer::~NNLayer () {
    free_space();
}
Beispiel #19
0
size_t
file_write(file_handle_t *handle, uint8_t* data, size_t size)
{

    if (!(handle->write_offset % FLASH_PAGE_SIZE)) //we are hanging around at the beginning of a page.
        //We do so because we are waiting for the page to erase. Check to see if it is ready for us
    {
        uint8_t counter = 0;
        flash_read(handle->start + handle->write_offset, &counter, 1);
        if (counter != 0xFF) //still waiting!
            return 0;

        //if we get here it is because the page has since been erased, and we can proceed
        counter = (0xFF << handle->write_count);
        ++handle->write_count;
        if (handle->write_count == 9) handle->write_count = 1;
        flash_write(handle->start + handle->write_offset, &counter, PAGE_COUNTER_SIZE);
        handle->write_offset += PAGE_COUNTER_SIZE;
    }

    if (size >= 0xFF) //reject, because this value is used as a flag for unused memory!
        //Also because we can only write one byte at a time atomically, we limit all metadata writes, including the record of the number of bytes written, to one byte.
        return 0;

    if ((size + 2 + PAGE_COUNTER_SIZE) > FLASH_PAGE_SIZE) //reject, because we cannot write chunks larger than the page size
        return 0;

    if ((size + 2) > free_space(handle)) //reject if not enough available space
        return 0;

    //here we need to find where we can put this chunk. Because writes cannot span page boundaries, we need to see if there is space on the current page or not
    //first, identify where the next page boundary is
    uint32_t next_page = FLASH_PAGE_SIZE * (handle->write_offset / FLASH_PAGE_SIZE) + FLASH_PAGE_SIZE;
    //uint32_t page_free_space = next_page - start;

    if ((handle->write_offset + size + 2) > next_page) //if not enough room in current page for metadata + data
    {
        advance_write_pointer_to_next_page(handle);
        //check if we can still write from where we are
        if (!(handle->write_offset % FLASH_PAGE_SIZE)) //if we are stuck in limbo
            return 0;
    }

    //need to recheck.
    if ((size + 2) > free_space(handle)) //reject if not enough available space
        return 0;

    //First, write first bit of metadata containing the actual addresses we are attempting to write to
    flash_write(handle->start + handle->write_offset, &size, 1);

    //Now, attempt to commit the data itself
    flash_write(handle->start + handle->write_offset + 2, data, size);

    //If we reach here successfully, the data is written and valid. Mark it so in the metadata
    uint8_t flags = 0xFE;
    flash_write(handle->start + handle->write_offset + 1, &flags, 1);

    advance_write_pointer(handle);

    return size;
}
void test_space_manager_4(void)
{
    container_handle_t *ct;
    object_handle_t *obj;
    uint64_t start_blk;
    int32_t ret;
    
    CU_ASSERT(ofs_create_container("sm", 1000, &ct) == 0);
    CU_ASSERT(ofs_create_object(ct, TEST_OBJID, FLAG_TABLE | CR_EXTENT | (CR_EXTENT << 4), &obj) == 0);

    // test free_space with discontinuous space
    CU_ASSERT(free_space(obj, 100, 50) == 0);  
    CU_ASSERT(free_space(obj, 80, 19) == 0);
    CU_ASSERT(free_space(obj, 151, 29) == 0);
    ret = alloc_space(obj, 80, 100, &start_blk);
    CU_ASSERT(start_blk == 80);
    CU_ASSERT(ret == 19);
    ret = alloc_space(obj, 80, 100, &start_blk);
    CU_ASSERT(start_blk == 100);
    CU_ASSERT(ret == 50);
    ret = alloc_space(obj, 80, 100, &start_blk);
    CU_ASSERT(start_blk == 151);
    CU_ASSERT(ret == 29);
    
    // test free_space with continuous space
    CU_ASSERT(free_space(obj, 100, 50) == 0);  
    CU_ASSERT(free_space(obj, 80, 20) == 0);
    CU_ASSERT(free_space(obj, 150, 30) == 0);
    ret = alloc_space(obj, 80, 100, &start_blk);
    CU_ASSERT(start_blk == 80);
    CU_ASSERT(ret == 100);
    
    // test free_space with continuous space
    CU_ASSERT(free_space(obj, 100, 50) == 0);  
    CU_ASSERT(free_space(obj, 80, 10) == 0);
    CU_ASSERT(free_space(obj, 160, 20) == 0);
    CU_ASSERT(free_space(obj, 90, 10) == 0);
    CU_ASSERT(free_space(obj, 150, 10) == 0);
    ret = alloc_space(obj, 80, 100, &start_blk);
    CU_ASSERT(start_blk == 80);
    CU_ASSERT(ret == 100);
    
    CU_ASSERT(ofs_close_object(obj) == 0);
    CU_ASSERT(ofs_close_container(ct) == 0);
}
Beispiel #21
0
bool BTIndexPage::redistribute(BTIndexPage *pptr, BTIndexPage *parentPtr,
		AttrType key_type, int left, const void *deletedKey)
{
	// assertion: pptr and parentPtr are  pinned

	if (left) { // 'this' is the left sibling of pptr
		if (slot[-(slotCnt-1)].length + free_space() > (MAX_SPACE-DPFIXED)/2) {
			// cannot spare a record for its underflow sibling
			return false;
		}
		else {
			// get its sibling's first record's key
			Status st;
			RID dummyRid;
			PageId dummyPageId;
			Keytype oldKey;
			pptr->get_first(dummyRid, (void*)&oldKey, dummyPageId);

			// get the entry pointing to the right sibling
			Keytype entry;
			st = parentPtr->findKey((void*)&oldKey, (void*)&entry, key_type);
			assert(st == OK);

			// get the leftmost child pointer of the right sibling
			PageId leftMostPageId = pptr->getLeftLink();

			// insert  <entry,leftMostPageId>  to its sibling
			st = pptr->insertKey((void*)&entry, key_type,
					leftMostPageId, dummyRid);
			if (st != OK)
				return false;

			// get the last record of itself
			PageId lastPageId;
			Keytype lastKey;
			get_key_data(&lastKey, (Datatype*)&lastPageId,
					(KeyDataEntry*)(data+slot[-(slotCnt-1)].offset),
					slot[-(slotCnt-1)].length, (nodetype)type);

			// set sibling's leftmostchild to be lastPageId
			pptr->setLeftLink(lastPageId);

			// delete the last record from the old page
			RID delRid;
			delRid.pageNo = page_no();
			delRid.slotNo = slotCnt-1;
			st = deleteRecord(delRid);
			assert(st == OK);

			// adjust the entry pointing to sibling in its parent
			if (deletedKey)
				st = parentPtr->adjust_key((void*)&lastKey, deletedKey,
						key_type);
			else
				st = parentPtr->adjust_key((void*)&lastKey,
						(void*)&oldKey, key_type);
			assert (st == OK);
		}
	}
	else { // 'this' is the right sibling of pptr
		if (slot[0].length + free_space() > (MAX_SPACE-DPFIXED)/2) {
			// cannot spare a record for its underflow sibling
			return false;
		}
		else {
			// get the first record
			Status st;
			PageId firstPageId;
			Keytype firstKey;
			get_key_data(&firstKey, (Datatype*)&firstPageId,
					(KeyDataEntry*)(data+slot[0].offset),
					slot[0].length, (nodetype)type);

			// get its leftmost child pointer
			PageId leftMostPageId = getLeftLink();

			// get the entry in its parent pointing to itself
			Keytype entry;
			st = parentPtr->findKey((void*)&firstKey, (void*)&entry, key_type);
			assert(st == OK);

			// insert <entry, leftMostPageId> to its left sibling
			RID dummyRid;
			st = pptr->insertKey((void*)&entry, key_type,
					leftMostPageId, dummyRid);
			if (st != OK)
				return false;

			// set its new leftmostchild
			setLeftLink(firstPageId);

			// delete the first record
			RID delRid;
			delRid.pageNo = page_no();
			delRid.slotNo = 0;
			st = deleteRecord(delRid);
			assert(st == OK);

			// adjust the entry pointing to itself in its parent
			st = parentPtr->adjust_key((void*)&firstKey,
					(void*)&entry, key_type);
			assert(st == OK);
		}
	}

	return true;
}
Beispiel #22
0
/* This is the main entrypoint into the slimmed-down software tnl
 * module.  In a regular swtnl driver, this can be plugged straight
 * into the vbo->Driver.DrawPrims() callback.
 */
void _tnl_draw_prims( struct gl_context *ctx,
		      const struct gl_client_array *arrays[],
		      const struct _mesa_prim *prim,
		      GLuint nr_prims,
		      const struct _mesa_index_buffer *ib,
		      GLuint min_index,
		      GLuint max_index)
{
   TNLcontext *tnl = TNL_CONTEXT(ctx);
   const GLuint TEST_SPLIT = 0;
   const GLint max = TEST_SPLIT ? 8 : tnl->vb.Size - MAX_CLIPPED_VERTICES;
   GLint max_basevertex = prim->basevertex;
   GLuint i;

   /* Mesa core state should have been validated already */
   assert(ctx->NewState == 0x0);

   if (!_mesa_check_conditional_render(ctx))
      return; /* don't draw */

   for (i = 1; i < nr_prims; i++)
      max_basevertex = MAX2(max_basevertex, prim[i].basevertex);

   if (0)
   {
      printf("%s %d..%d\n", __FUNCTION__, min_index, max_index);
      for (i = 0; i < nr_prims; i++)
	 printf("prim %d: %s start %d count %d\n", i, 
		_mesa_lookup_enum_by_nr(prim[i].mode),
		prim[i].start,
		prim[i].count);
   }

   if (min_index) {
      /* We always translate away calls with min_index != 0. 
       */
      vbo_rebase_prims( ctx, arrays, prim, nr_prims, ib, 
			min_index, max_index,
			_tnl_vbo_draw_prims );
      return;
   }
   else if ((GLint)max_index + max_basevertex > max) {
      /* The software TNL pipeline has a fixed amount of storage for
       * vertices and it is necessary to split incoming drawing commands
       * if they exceed that limit.
       */
      struct split_limits limits;
      limits.max_verts = max;
      limits.max_vb_size = ~0;
      limits.max_indices = ~0;

      /* This will split the buffers one way or another and
       * recursively call back into this function.
       */
      vbo_split_prims( ctx, arrays, prim, nr_prims, ib, 
		       0, max_index + prim->basevertex,
		       _tnl_vbo_draw_prims,
		       &limits );
   }
   else {
      /* May need to map a vertex buffer object for every attribute plus
       * one for the index buffer.
       */
      struct gl_buffer_object *bo[VERT_ATTRIB_MAX + 1];
      GLuint nr_bo = 0;
      GLuint inst;

      for (i = 0; i < nr_prims;) {
	 GLuint this_nr_prims;

	 /* Our SW TNL pipeline doesn't handle basevertex yet, so bind_indices
	  * will rebase the elements to the basevertex, and we'll only
	  * emit strings of prims with the same basevertex in one draw call.
	  */
	 for (this_nr_prims = 1; i + this_nr_prims < nr_prims;
	      this_nr_prims++) {
	    if (prim[i].basevertex != prim[i + this_nr_prims].basevertex)
	       break;
	 }

         assert(prim[i].num_instances > 0);

	 /* Binding inputs may imply mapping some vertex buffer objects.
	  * They will need to be unmapped below.
	  */
         for (inst = 0; inst < prim[i].num_instances; inst++) {

            bind_prims(ctx, &prim[i], this_nr_prims);
            bind_inputs(ctx, arrays, max_index + prim[i].basevertex + 1,
                        bo, &nr_bo);
            bind_indices(ctx, ib, bo, &nr_bo);

            tnl->CurInstance = inst;
            TNL_CONTEXT(ctx)->Driver.RunPipeline(ctx);

            unmap_vbos(ctx, bo, nr_bo);
            free_space(ctx);
         }

	 i += this_nr_prims;
      }
   }
}
Beispiel #23
0
/* This is the main entrypoint into the slimmed-down software tnl
 * module.  In a regular swtnl driver, this can be plugged straight
 * into the vbo->Driver.DrawPrims() callback.
 */
void _tnl_draw_prims( GLcontext *ctx,
		      const struct gl_client_array *arrays[],
		      const struct _mesa_prim *prim,
		      GLuint nr_prims,
		      const struct _mesa_index_buffer *ib,
		      GLuint min_index,
		      GLuint max_index)
{
   TNLcontext *tnl = TNL_CONTEXT(ctx);
   const GLuint TEST_SPLIT = 0;
   const GLint max = TEST_SPLIT ? 8 : tnl->vb.Size - MAX_CLIPPED_VERTICES;

   if (0)
   {
      GLuint i;
      _mesa_printf("%s %d..%d\n", __FUNCTION__, min_index, max_index);
      for (i = 0; i < nr_prims; i++)
	 _mesa_printf("prim %d: %s start %d count %d\n", i, 
		      _mesa_lookup_enum_by_nr(prim[i].mode),
		      prim[i].start,
		      prim[i].count);
   }

   if (min_index) {
      /* We always translate away calls with min_index != 0. 
       */
      vbo_rebase_prims( ctx, arrays, prim, nr_prims, ib, 
			min_index, max_index,
			_tnl_draw_prims );
      return;
   }
   else if (max_index >= max) {
      /* The software TNL pipeline has a fixed amount of storage for
       * vertices and it is necessary to split incoming drawing commands
       * if they exceed that limit.
       */
      struct split_limits limits;
      limits.max_verts = max;
      limits.max_vb_size = ~0;
      limits.max_indices = ~0;

      /* This will split the buffers one way or another and
       * recursively call back into this function.
       */
      vbo_split_prims( ctx, arrays, prim, nr_prims, ib, 
		       0, max_index,
		       _tnl_draw_prims,
		       &limits );
   }
   else {
      /* May need to map a vertex buffer object for every attribute plus
       * one for the index buffer.
       */
      struct gl_buffer_object *bo[VERT_ATTRIB_MAX + 1];
      GLuint nr_bo = 0;

      /* Binding inputs may imply mapping some vertex buffer objects.
       * They will need to be unmapped below.
       */
      bind_inputs(ctx, arrays, max_index+1, bo, &nr_bo);
      bind_indices(ctx, ib, bo, &nr_bo);
      bind_prims(ctx, prim, nr_prims );

      TNL_CONTEXT(ctx)->Driver.RunPipeline(ctx);

      unmap_vbos(ctx, bo, nr_bo);
      free_space(ctx);
   }
}
int reduced_covers (FILE *descendant_file, FILE *covers_file, int k, int ***auts, struct pga_vars *pga, struct pcp_vars *pcp)
{
   int lower_step, upper_step;
   int nmr_of_covers = 0;
   int *a, *b;                  /* arrays needed for orbit calculation */
   char *c;                     /* array needed for stabiliser calculation */
   int **perms;                 /* store all permutations */
   int *orbit_length;           /* length of orbits */
   FILE * LINK_input;        /* input file for GAP */
#if defined (GAP_LINK) 
   Logical process_fork = FALSE; /* has GAP process forked? */        
#endif
   Logical soluble_group;       /* indicates that orbits and stabilisers may 
				   be computed using soluble machinery */

   /* calculate the extended automorphisms */
   extend_automorphisms (auts, pga->m, pcp);
   if (pcp->overflow) return 0;

   if (pga->print_extensions && pga->m != 0) {  
      printf ("\nThe extension%s:\n", pga->m == 1 ? " is" : "s are");
      print_auts (pga->m, pcp->lastg, auts, pcp);
   }

   /* find range of permitted step sizes */
   step_range (k, &lower_step, &upper_step, auts, pga, pcp);

   /* set up space for definition sets */
   store_definition_sets (pga->r, lower_step, upper_step, pga);

   /* loop over each permitted step size */ 
   for (pga->s = lower_step; pga->s <= upper_step; ++pga->s) {

      if (pga->trace)
	 trace_details (pga);

      get_definition_sets (pga);
      compute_degree (pga);

      /* establish which automorphisms induce the identity 
	 on the relevant subgroup of the p-multiplicator */
      strip_identities (auts, pga, pcp);

      /* if possible, use the more efficient soluble code --
	 in particular, certain extreme cases can be handled */
      soluble_group = (pga->soluble || pga->Degree == 1 || 
		       pga->nmr_of_perms == 0);

      if (!soluble_group) {
#if defined (GAP_LINK) 
	 if (!process_fork) {
	    start_GAP_file (auts, pga);
	    process_fork = TRUE;
	 }
	 StartGapFile (pga);
#else
#if defined (GAP_LINK_VIA_FILE) 
	 start_GAP_file (&LINK_input, auts, pga, pcp);
#endif
#endif
      }

      perms = permute_subgroups (LINK_input, &a, &b, &c, auts, pga, pcp);

      if (!pga->space_efficient) {
	 if (soluble_group)
	    compute_orbits (&a, &b, &c, perms, pga);
	 else
	    insoluble_compute_orbits (&a, &b, &c, perms, pga);
      }
      orbit_length = find_orbit_reps (a, b, pga);

      if (pga->print_orbit_summary)
	 orbit_summary (orbit_length, pga);

      if (soluble_group && pga->print_orbit_arrays)
	 print_orbit_information (a, b, c, pga);

      pga->final_stage = (pga->q == pga->multiplicator_rank);

      if (!soluble_group) {
#if defined (GAP_LINK_VIA_FILE) 
	 CloseFile (LINK_input);
#endif 
      }

      setup_reps (pga->rep, pga->nmr_orbits, orbit_length, perms, a, b, c, 
		  auts, descendant_file, covers_file, pga, pcp);

      if (!pga->final_stage)
	 nmr_of_covers += pga->nmr_orbits;

      free_space (soluble_group, perms, orbit_length, 
		  a, b, c, pga); 
   }     

#if defined (GAP_LINK)
   if (process_fork) 
      QuitGap ();
#endif 

   free_vector (pga->list, 0);
   free_vector (pga->available, 0);
   free_vector (pga->offset, 0);

   return nmr_of_covers;
}
// allocate all space by one/two allocation action
void test_space_manager_2(void)
{
    container_handle_t *ct;
    object_handle_t *obj;
    uint64_t start_blk;
    int32_t ret;
    
    CU_ASSERT(ofs_create_container("sm", 1000, &ct) == 0);
    CU_ASSERT(ofs_create_object(ct, TEST_OBJID, FLAG_TABLE | CR_EXTENT | (CR_EXTENT << 4), &obj) == 0);

    CU_ASSERT(free_space(obj, 100, 50) == 0);
    ret = alloc_space(obj, 10, 80, &start_blk); // will alloc 100, 50
    CU_ASSERT(start_blk == 100);
    CU_ASSERT(ret == 50);
    ret = alloc_space(obj, 10, 80, &start_blk); // no space now
    CU_ASSERT(ret == -INDEX_ERR_NO_FREE_BLOCKS);

    CU_ASSERT(free_space(obj, 100, 50) == 0);
    ret = alloc_space(obj, 10, 90, &start_blk); // will alloc 100, 50
    CU_ASSERT(start_blk == 100);
    CU_ASSERT(ret == 50);
    ret = alloc_space(obj, 10, 90, &start_blk); // no space now
    CU_ASSERT(ret == -INDEX_ERR_NO_FREE_BLOCKS);

    CU_ASSERT(free_space(obj, 100, 50) == 0);
    ret = alloc_space(obj, 10, 100, &start_blk); // will alloc 100, 50
    CU_ASSERT(start_blk == 100);
    CU_ASSERT(ret == 50);
    ret = alloc_space(obj, 10, 100, &start_blk); // no space now
    CU_ASSERT(ret == -INDEX_ERR_NO_FREE_BLOCKS);

    CU_ASSERT(free_space(obj, 100, 50) == 0);
    ret = alloc_space(obj, 10, 200, &start_blk); // will alloc 100, 50
    CU_ASSERT(start_blk == 100);
    CU_ASSERT(ret == 50);
    ret = alloc_space(obj, 10, 200, &start_blk); // no space now
    CU_ASSERT(ret == -INDEX_ERR_NO_FREE_BLOCKS);

    CU_ASSERT(free_space(obj, 100, 50) == 0);
    ret = alloc_space(obj, 100, 50, &start_blk); // will alloc 100, 50
    CU_ASSERT(start_blk == 100);
    CU_ASSERT(ret == 50);
    ret = alloc_space(obj, 100, 50, &start_blk); // no space now
    CU_ASSERT(ret == -INDEX_ERR_NO_FREE_BLOCKS);
    
    CU_ASSERT(free_space(obj, 100, 50) == 0);
    ret = alloc_space(obj, 100, 60, &start_blk); // will alloc 100, 50
    CU_ASSERT(start_blk == 100);
    CU_ASSERT(ret == 50);
    ret = alloc_space(obj, 100, 60, &start_blk); // no space now
    CU_ASSERT(ret == -INDEX_ERR_NO_FREE_BLOCKS);
    
    CU_ASSERT(free_space(obj, 100, 50) == 0);
    ret = alloc_space(obj, 120, 60, &start_blk); // will alloc 120, 30
    CU_ASSERT(start_blk == 120);
    CU_ASSERT(ret == 30);
    ret = alloc_space(obj, 120, 60, &start_blk); // will alloc 100, 20
    CU_ASSERT(start_blk == 100);
    CU_ASSERT(ret == 20);
    ret = alloc_space(obj, 120, 60, &start_blk); // no space now
    CU_ASSERT(ret == -INDEX_ERR_NO_FREE_BLOCKS);
    
    CU_ASSERT(free_space(obj, 100, 50) == 0);
    ret = alloc_space(obj, 150, 60, &start_blk); // will alloc 100, 50
    CU_ASSERT(start_blk == 100);
    CU_ASSERT(ret == 50);
    ret = alloc_space(obj, 150, 60, &start_blk); // no space now
    CU_ASSERT(ret == -INDEX_ERR_NO_FREE_BLOCKS);
    
    CU_ASSERT(free_space(obj, 100, 50) == 0);
    ret = alloc_space(obj, 160, 60, &start_blk); // will alloc 100, 50
    CU_ASSERT(start_blk == 100);
    CU_ASSERT(ret == 50);
    ret = alloc_space(obj, 160, 60, &start_blk); // no space now
    CU_ASSERT(ret == -INDEX_ERR_NO_FREE_BLOCKS);
    
    CU_ASSERT(ofs_close_object(obj) == 0);
    CU_ASSERT(ofs_close_container(ct) == 0);

    CU_ASSERT(ofs_open_container("sm", &ct) == 0);
    CU_ASSERT(ofs_close_container(ct) == 0);
}
Beispiel #26
0
static void collect_if_no_room( young_heap_t *heap, int room )
{
  room = roundup_balign( room );
  if (free_space( heap ) < room)
    gc_collect( heap->collector, 0, room, GCTYPE_EVACUATE );
}
Beispiel #27
0
/**
 * Perform FILEINFO processing specific to a regular file
 */
void handle_fileinfo_regular(struct group_list_t *group)
{
    // First handle restart or sync mode,
    // then create/open the file.
    if (group->restartinfo) {
        if (handle_fileinfo_restart(group)) {
            return;
        }
    } else if (group->sync_mode) {
        if (handle_fileinfo_sync(group)) {
            return;
        }
    }
    if (group->fileinfo.restart) {
        group->fileinfo.fd = open(group->fileinfo.filepath, OPENWRITE);
    } else {
        const char *filename;
        if (tempfile) {
            filename = group->fileinfo.temppath;
        } else {
            filename = group->fileinfo.filepath;
        }
#ifdef WINDOWS
        SetFileAttributes(filename, FILE_ATTRIBUTE_NORMAL);
#else
        chmod(filename, 0644);
#endif
        group->fileinfo.fd = open(filename, OPENWRITE | O_CREAT | O_TRUNC,0644);
    }
    if (group->fileinfo.fd == -1) {
        gsyserror(group, "Error opening data file");
        early_complete(group, COMP_STAT_REJECTED, 0);
        return;
    }
    if (group->fileinfo.size > free_space(group->fileinfo.filepath)) {
        glog0(group, "Not enough disk space, aborting");
        send_abort(group, "Not enough disk space");
        return;
    }

    // Final preparations for receiving a file.
    if (group->fileinfo.restart) {
        group->fileinfo.naklist = group->restartinfo->naklist;
        group->fileinfo.section_done = group->restartinfo->section_done;
        group->restartinfo->naklist = NULL;
        group->restartinfo->section_done = NULL;
        free(group->restartinfo);
        group->restartinfo = NULL;
    } else {
        group->fileinfo.naklist = safe_calloc(group->fileinfo.blocks, 1);
        group->fileinfo.section_done = safe_calloc(group->fileinfo.sections, 1);
        memset(group->fileinfo.naklist, 1, group->fileinfo.blocks);
    }
    group->fileinfo.last_block = -1;
    group->fileinfo.last_section = 0;
    group->fileinfo.curr_offset = 0;
    group->fileinfo.cache_start = 0;
    group->fileinfo.cache_end = 0;
    group->fileinfo.cache_len = 0;
    group->fileinfo.cache = safe_calloc(cache_len, 1);
    group->fileinfo.cache_status = safe_calloc(cache_len / group->blocksize, 1);
    group->phase = PHASE_RECEIVING;
    send_fileinfo_ack(group, group->fileinfo.restart);
    set_timeout(group, 0);
}
/******************************************************************
* Destructor                                                      *
******************************************************************/
pcheck_mat_dense::~pcheck_mat_dense(void)
{
	/* free memory for matrix H */
	free_space();
	H = NULL;
}
void PSAdaptiveSizePolicy::compute_generation_free_space(
                                           size_t young_live,
                                           size_t eden_live,
                                           size_t old_live,
                                           size_t perm_live,
                                           size_t cur_eden,
                                           size_t max_old_gen_size,
                                           size_t max_eden_size,
                                           bool   is_full_gc,
                                           GCCause::Cause gc_cause,
                                           CollectorPolicy* collector_policy) {

  // Update statistics
  // Time statistics are updated as we go, update footprint stats here
  _avg_base_footprint->sample(BaseFootPrintEstimate + perm_live);
  avg_young_live()->sample(young_live);
  avg_eden_live()->sample(eden_live);
  if (is_full_gc) {
    // old_live is only accurate after a full gc
    avg_old_live()->sample(old_live);
  }

  // This code used to return if the policy was not ready , i.e.,
  // policy_is_ready() returning false.  The intent was that
  // decisions below needed major collection times and so could
  // not be made before two major collections.  A consequence was
  // adjustments to the young generation were not done until after
  // two major collections even if the minor collections times
  // exceeded the requested goals.  Now let the young generation
  // adjust for the minor collection times.  Major collection times
  // will be zero for the first collection and will naturally be
  // ignored.  Tenured generation adjustments are only made at the
  // full collections so until the second major collection has
  // been reached, no tenured generation adjustments will be made.

  // Until we know better, desired promotion size uses the last calculation
  size_t desired_promo_size = _promo_size;

  // Start eden at the current value.  The desired value that is stored
  // in _eden_size is not bounded by constraints of the heap and can
  // run away.
  //
  // As expected setting desired_eden_size to the current
  // value of desired_eden_size as a starting point
  // caused desired_eden_size to grow way too large and caused
  // an overflow down stream.  It may have improved performance in
  // some case but is dangerous.
  size_t desired_eden_size = cur_eden;

#ifdef ASSERT
  size_t original_promo_size = desired_promo_size;
  size_t original_eden_size = desired_eden_size;
#endif

  // Cache some values. There's a bit of work getting these, so
  // we might save a little time.
  const double major_cost = major_gc_cost();
  const double minor_cost = minor_gc_cost();

  // Used for diagnostics
  clear_generation_free_space_flags();

  // Limits on our growth
  size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average());

  // This method sets the desired eden size.  That plus the
  // desired survivor space sizes sets the desired young generation
  // size.  This methods does not know what the desired survivor
  // size is but expects that other policy will attempt to make
  // the survivor sizes compatible with the live data in the
  // young generation.  This limit is an estimate of the space left
  // in the young generation after the survivor spaces have been
  // subtracted out.
  size_t eden_limit = max_eden_size;

  // But don't force a promo size below the current promo size. Otherwise,
  // the promo size will shrink for no good reason.
  promo_limit = MAX2(promo_limit, _promo_size);

  const double gc_cost_limit = GCTimeLimit/100.0;

  // Which way should we go?
  // if pause requirement is not met
  //   adjust size of any generation with average paus exceeding
  //   the pause limit.  Adjust one pause at a time (the larger)
  //   and only make adjustments for the major pause at full collections.
  // else if throughput requirement not met
  //   adjust the size of the generation with larger gc time.  Only
  //   adjust one generation at a time.
  // else
  //   adjust down the total heap size.  Adjust down the larger of the
  //   generations.

  // Add some checks for a threshhold for a change.  For example,
  // a change less than the necessary alignment is probably not worth
  // attempting.


  if ((_avg_minor_pause->padded_average() > gc_pause_goal_sec()) ||
      (_avg_major_pause->padded_average() > gc_pause_goal_sec())) {
    //
    // Check pauses
    //
    // Make changes only to affect one of the pauses (the larger)
    // at a time.
    adjust_for_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size);

  } else if (_avg_minor_pause->padded_average() > gc_minor_pause_goal_sec()) {
    // Adjust only for the minor pause time goal
    adjust_for_minor_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size);

  } else if(adjusted_mutator_cost() < _throughput_goal) {
    // This branch used to require that (mutator_cost() > 0.0 in 1.4.2.
    // This sometimes resulted in skipping to the minimize footprint
    // code.  Change this to try and reduce GC time if mutator time is
    // negative for whatever reason.  Or for future consideration,
    // bail out of the code if mutator time is negative.
    //
    // Throughput
    //
    assert(major_cost >= 0.0, "major cost is < 0.0");
    assert(minor_cost >= 0.0, "minor cost is < 0.0");
    // Try to reduce the GC times.
    adjust_for_throughput(is_full_gc, &desired_promo_size, &desired_eden_size);

  } else {

    // Be conservative about reducing the footprint.
    //   Do a minimum number of major collections first.
    //   Have reasonable averages for major and minor collections costs.
    if (UseAdaptiveSizePolicyFootprintGoal &&
        young_gen_policy_is_ready() &&
        avg_major_gc_cost()->average() >= 0.0 &&
        avg_minor_gc_cost()->average() >= 0.0) {
      size_t desired_sum = desired_eden_size + desired_promo_size;
      desired_eden_size = adjust_eden_for_footprint(desired_eden_size,
                                                    desired_sum);
      if (is_full_gc) {
        set_decide_at_full_gc(decide_at_full_gc_true);
        desired_promo_size = adjust_promo_for_footprint(desired_promo_size,
                                                        desired_sum);
      }
    }
  }

  // Note we make the same tests as in the code block below;  the code
  // seems a little easier to read with the printing in another block.
  if (PrintAdaptiveSizePolicy) {
    if (desired_promo_size > promo_limit)  {
      // "free_in_old_gen" was the original value for used for promo_limit
      size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
      gclog_or_tty->print_cr(
            "PSAdaptiveSizePolicy::compute_generation_free_space limits:"
            " desired_promo_size: " SIZE_FORMAT
            " promo_limit: " SIZE_FORMAT
            " free_in_old_gen: " SIZE_FORMAT
            " max_old_gen_size: " SIZE_FORMAT
            " avg_old_live: " SIZE_FORMAT,
            desired_promo_size, promo_limit, free_in_old_gen,
            max_old_gen_size, (size_t) avg_old_live()->average());
    }
    if (desired_eden_size > eden_limit) {
      gclog_or_tty->print_cr(
            "AdaptiveSizePolicy::compute_generation_free_space limits:"
            " desired_eden_size: " SIZE_FORMAT
            " old_eden_size: " SIZE_FORMAT
            " eden_limit: " SIZE_FORMAT
            " cur_eden: " SIZE_FORMAT
            " max_eden_size: " SIZE_FORMAT
            " avg_young_live: " SIZE_FORMAT,
            desired_eden_size, _eden_size, eden_limit, cur_eden,
            max_eden_size, (size_t)avg_young_live()->average());
    }
    if (gc_cost() > gc_cost_limit) {
      gclog_or_tty->print_cr(
            "AdaptiveSizePolicy::compute_generation_free_space: gc time limit"
            " gc_cost: %f "
            " GCTimeLimit: %d",
            gc_cost(), GCTimeLimit);
    }
  }

  // Align everything and make a final limit check
  const size_t alignment = _intra_generation_alignment;
  desired_eden_size  = align_size_up(desired_eden_size, alignment);
  desired_eden_size  = MAX2(desired_eden_size, alignment);
  desired_promo_size = align_size_up(desired_promo_size, alignment);
  desired_promo_size = MAX2(desired_promo_size, alignment);

  eden_limit  = align_size_down(eden_limit, alignment);
  promo_limit = align_size_down(promo_limit, alignment);

  // Is too much time being spent in GC?
  //   Is the heap trying to grow beyond it's limits?

  const size_t free_in_old_gen =
    (size_t)(max_old_gen_size - avg_old_live()->average());
  if (desired_promo_size > free_in_old_gen && desired_eden_size > eden_limit) {
    check_gc_overhead_limit(young_live,
                            eden_live,
                            max_old_gen_size,
                            max_eden_size,
                            is_full_gc,
                            gc_cause,
                            collector_policy);
  }


  // And one last limit check, now that we've aligned things.
  if (desired_eden_size > eden_limit) {
    // If the policy says to get a larger eden but
    // is hitting the limit, don't decrease eden.
    // This can lead to a general drifting down of the
    // eden size.  Let the tenuring calculation push more
    // into the old gen.
    desired_eden_size = MAX2(eden_limit, cur_eden);
  }
  desired_promo_size = MIN2(desired_promo_size, promo_limit);


  if (PrintAdaptiveSizePolicy) {
    // Timing stats
    gclog_or_tty->print(
               "PSAdaptiveSizePolicy::compute_generation_free_space: costs"
               " minor_time: %f"
               " major_cost: %f"
               " mutator_cost: %f"
               " throughput_goal: %f",
               minor_gc_cost(), major_gc_cost(), mutator_cost(),
               _throughput_goal);

    // We give more details if Verbose is set
    if (Verbose) {
      gclog_or_tty->print( " minor_pause: %f"
                  " major_pause: %f"
                  " minor_interval: %f"
                  " major_interval: %f"
                  " pause_goal: %f",
                  _avg_minor_pause->padded_average(),
                  _avg_major_pause->padded_average(),
                  _avg_minor_interval->average(),
                  _avg_major_interval->average(),
                  gc_pause_goal_sec());
    }

    // Footprint stats
    gclog_or_tty->print( " live_space: " SIZE_FORMAT
                " free_space: " SIZE_FORMAT,
                live_space(), free_space());
    // More detail
    if (Verbose) {
      gclog_or_tty->print( " base_footprint: " SIZE_FORMAT
                  " avg_young_live: " SIZE_FORMAT
                  " avg_old_live: " SIZE_FORMAT,
                  (size_t)_avg_base_footprint->average(),
                  (size_t)avg_young_live()->average(),
                  (size_t)avg_old_live()->average());
    }

    // And finally, our old and new sizes.
    gclog_or_tty->print(" old_promo_size: " SIZE_FORMAT
               " old_eden_size: " SIZE_FORMAT
               " desired_promo_size: " SIZE_FORMAT
               " desired_eden_size: " SIZE_FORMAT,
               _promo_size, _eden_size,
               desired_promo_size, desired_eden_size);
    gclog_or_tty->cr();
  }

  decay_supplemental_growth(is_full_gc);

  set_promo_size(desired_promo_size);
  set_eden_size(desired_eden_size);
};
void PSAdaptiveSizePolicy::compute_old_gen_free_space(
                                           size_t old_live,
                                           size_t cur_eden,
                                           size_t max_old_gen_size,
                                           bool   is_full_gc) {

  // Update statistics
  // Time statistics are updated as we go, update footprint stats here
  if (is_full_gc) {
    // old_live is only accurate after a full gc
    avg_old_live()->sample(old_live);
  }

  // This code used to return if the policy was not ready , i.e.,
  // policy_is_ready() returning false.  The intent was that
  // decisions below needed major collection times and so could
  // not be made before two major collections.  A consequence was
  // adjustments to the young generation were not done until after
  // two major collections even if the minor collections times
  // exceeded the requested goals.  Now let the young generation
  // adjust for the minor collection times.  Major collection times
  // will be zero for the first collection and will naturally be
  // ignored.  Tenured generation adjustments are only made at the
  // full collections so until the second major collection has
  // been reached, no tenured generation adjustments will be made.

  // Until we know better, desired promotion size uses the last calculation
  size_t desired_promo_size = _promo_size;

  // Start eden at the current value.  The desired value that is stored
  // in _eden_size is not bounded by constraints of the heap and can
  // run away.
  //
  // As expected setting desired_eden_size to the current
  // value of desired_eden_size as a starting point
  // caused desired_eden_size to grow way too large and caused
  // an overflow down stream.  It may have improved performance in
  // some case but is dangerous.
  size_t desired_eden_size = cur_eden;

  // Cache some values. There's a bit of work getting these, so
  // we might save a little time.
  const double major_cost = major_gc_cost();
  const double minor_cost = minor_gc_cost();

  // Limits on our growth
  size_t promo_limit = (size_t)(max_old_gen_size - avg_old_live()->average());

  // But don't force a promo size below the current promo size. Otherwise,
  // the promo size will shrink for no good reason.
  promo_limit = MAX2(promo_limit, _promo_size);

  const double gc_cost_limit = GCTimeLimit/100.0;

  // Which way should we go?
  // if pause requirement is not met
  //   adjust size of any generation with average paus exceeding
  //   the pause limit.  Adjust one pause at a time (the larger)
  //   and only make adjustments for the major pause at full collections.
  // else if throughput requirement not met
  //   adjust the size of the generation with larger gc time.  Only
  //   adjust one generation at a time.
  // else
  //   adjust down the total heap size.  Adjust down the larger of the
  //   generations.

  // Add some checks for a threshold for a change.  For example,
  // a change less than the necessary alignment is probably not worth
  // attempting.

  if ((_avg_minor_pause->padded_average() > gc_pause_goal_sec()) ||
      (_avg_major_pause->padded_average() > gc_pause_goal_sec())) {
    //
    // Check pauses
    //
    // Make changes only to affect one of the pauses (the larger)
    // at a time.
    if (is_full_gc) {
      set_decide_at_full_gc(decide_at_full_gc_true);
      adjust_promo_for_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size);
    }
  } else if (adjusted_mutator_cost() < _throughput_goal) {
    // This branch used to require that (mutator_cost() > 0.0 in 1.4.2.
    // This sometimes resulted in skipping to the minimize footprint
    // code.  Change this to try and reduce GC time if mutator time is
    // negative for whatever reason.  Or for future consideration,
    // bail out of the code if mutator time is negative.
    //
    // Throughput
    //
    assert(major_cost >= 0.0, "major cost is < 0.0");
    assert(minor_cost >= 0.0, "minor cost is < 0.0");
    // Try to reduce the GC times.
    if (is_full_gc) {
      set_decide_at_full_gc(decide_at_full_gc_true);
      adjust_promo_for_throughput(is_full_gc, &desired_promo_size);
    }
  } else {

    // Be conservative about reducing the footprint.
    //   Do a minimum number of major collections first.
    //   Have reasonable averages for major and minor collections costs.
    if (UseAdaptiveSizePolicyFootprintGoal &&
        young_gen_policy_is_ready() &&
        avg_major_gc_cost()->average() >= 0.0 &&
        avg_minor_gc_cost()->average() >= 0.0) {
      if (is_full_gc) {
        set_decide_at_full_gc(decide_at_full_gc_true);
        size_t desired_sum = desired_eden_size + desired_promo_size;
        desired_promo_size = adjust_promo_for_footprint(desired_promo_size, desired_sum);
      }
    }
  }

  // Note we make the same tests as in the code block below;  the code
  // seems a little easier to read with the printing in another block.
  if (desired_promo_size > promo_limit)  {
    // "free_in_old_gen" was the original value for used for promo_limit
    size_t free_in_old_gen = (size_t)(max_old_gen_size - avg_old_live()->average());
    log_debug(gc, ergo)(
          "PSAdaptiveSizePolicy::compute_old_gen_free_space limits:"
          " desired_promo_size: " SIZE_FORMAT
          " promo_limit: " SIZE_FORMAT
          " free_in_old_gen: " SIZE_FORMAT
          " max_old_gen_size: " SIZE_FORMAT
          " avg_old_live: " SIZE_FORMAT,
          desired_promo_size, promo_limit, free_in_old_gen,
          max_old_gen_size, (size_t) avg_old_live()->average());
  }
  if (gc_cost() > gc_cost_limit) {
    log_debug(gc, ergo)(
          "PSAdaptiveSizePolicy::compute_old_gen_free_space: gc time limit"
          " gc_cost: %f "
          " GCTimeLimit: " UINTX_FORMAT,
          gc_cost(), GCTimeLimit);
  }

  // Align everything and make a final limit check
  desired_promo_size = align_size_up(desired_promo_size, _space_alignment);
  desired_promo_size = MAX2(desired_promo_size, _space_alignment);

  promo_limit = align_size_down(promo_limit, _space_alignment);

  // And one last limit check, now that we've aligned things.
  desired_promo_size = MIN2(desired_promo_size, promo_limit);

  // Timing stats
  log_debug(gc, ergo)("PSAdaptiveSizePolicy::compute_old_gen_free_space: costs minor_time: %f major_cost: %f  mutator_cost: %f throughput_goal: %f",
             minor_gc_cost(), major_gc_cost(), mutator_cost(), _throughput_goal);

  log_trace(gc, ergo)("Minor_pause: %f major_pause: %f minor_interval: %f major_interval: %f pause_goal: %f",
                      _avg_minor_pause->padded_average(),
                      _avg_major_pause->padded_average(),
                      _avg_minor_interval->average(),
                      _avg_major_interval->average(),
                      gc_pause_goal_sec());

  // Footprint stats
  log_debug(gc, ergo)("Live_space: " SIZE_FORMAT " free_space: " SIZE_FORMAT,
                      live_space(), free_space());

  log_trace(gc, ergo)("Base_footprint: " SIZE_FORMAT " avg_young_live: " SIZE_FORMAT " avg_old_live: " SIZE_FORMAT,
                      (size_t)_avg_base_footprint->average(),
                      (size_t)avg_young_live()->average(),
                      (size_t)avg_old_live()->average());

  log_debug(gc, ergo)("Old promo_size: " SIZE_FORMAT " desired_promo_size: " SIZE_FORMAT,
                      _promo_size, desired_promo_size);

  set_promo_size(desired_promo_size);
}