Esempio n. 1
0
static bool
coalesce_blocks(void *ptr1, void *ptr2)
{
	void *tmpptr = Min(ptr1, ptr2);
	Size new_size;
	void *next;

	ptr2 = Max(ptr1, ptr2);
	ptr1 = tmpptr;

	if (get_end(ptr1) != get_header(ptr2))
		return false;

	Assert(get_next(ptr1) == ptr2);
	Assert(!is_allocated(ptr1));
	Assert(!is_allocated(ptr2));

	new_size = get_size(ptr1) + BLOCK_SIZE(get_size(ptr2));
	get_header(ptr1)->size = new_size;
	/* Mark ptr2 as no longer an ICE BOODA. */
	get_header(ptr2)->magic = 0;

	next = get_next(ptr2);
	set_next(ptr1, next);

	if (next)
		set_prev(next, ptr1);

	return true;
}
Esempio n. 2
0
// Anthony
int allocate(int number_of_bytes)
{
    Node *p = HeapArray;

    // find a free header that fits number_of_bytes
    while (p < HeapArray + HEAP_SIZE) {
        // if free and enough space
        if ( is_allocated(p) == 0 && block_size(p) >= number_of_bytes) {

            // if we aren't allocating the whole block,
            // it must be split and a new header must be made
            if (number_of_bytes < p->size) {
                Node *next_header = p + number_of_bytes;
                set_allocated(next_header, 0);
                set_block_size(next_header, p->size - number_of_bytes);
                set_block_number(next_header, 0);
            }
            // finally, allocate the header
            set_allocated(p, 1);
            set_block_size(p, number_of_bytes);
            set_block_number(p, nextBlockNumber);

            printf("%d\n", nextBlockNumber);
            return nextBlockNumber++;
        }
        p = p + block_size(p);
    }
    printf("Could not allocate %d bytes.\n", number_of_bytes);
    return 0;
}
Esempio n. 3
0
 void set_to_identity() {
   assert(is_allocated());
   assert(size1_==size2_);
   if (size1_>0) {
     block().setIdentity();
   }
 }
Esempio n. 4
0
 //delete last row and column
 inline void remove_row_col_last() {
   assert(is_allocated());
   assert(size1_>0);
   assert(size2_>0);
   --size1_;
   --size2_;
 }
Esempio n. 5
0
    // try to allocate; if cannot, return 0
    type * allocate( int nChunk) {
        lock_type locker( m_cs);

        int nConsecutive = 0;
        int idxStartOfChunk = 0;
        for ( int idx = 0; idx < max; ++idx) {
            if ( is_allocated( idx)) 
                nConsecutive = 0;
            else {
                if ( nConsecutive == 0)
                    idxStartOfChunk = idx;
                ++nConsecutive;
            }

            if ( nConsecutive >= nChunk) {
                // we can allocate
                for ( int idxAllocated = 0; idxAllocated < nChunk; ++idxAllocated) 
                    set_as_allocated( idxStartOfChunk + idxAllocated);
                return m_vals + idxStartOfChunk;
            }
        }

        // we cannot allocated
        return 0;
    }
Esempio n. 6
0
/*
 * ShmemDynAlloc
 */
void *
ShmemDynAlloc(Size size)
{
	void *block = NULL;
	Size padded_size;

	size = Max(ALIGN(size), MIN_ALLOC_SIZE);
	for (padded_size = 1; padded_size < size && padded_size <= 1024; padded_size *= 2);
	size = Max(size, padded_size);

	block = get_block(size);

	if (block == NULL)
	{
		/*
		 * Don't request fewer than 1k from ShmemAlloc.
		 * The more contiguous memory we have, the better we
		 * can combat fragmentation.
		 */
		Size alloc_size = Max(size, MIN_SHMEM_ALLOC_SIZE);
		block = ShmemAlloc(BLOCK_SIZE(alloc_size));

		memset(block, 0, BLOCK_SIZE(alloc_size));
		block = (void *) ((intptr_t) block + sizeof(Header));
		init_block(block, alloc_size, true);
		mark_allocated(block);
	}

	if (get_size(block) - size >= MIN_BLOCK_SIZE)
		split_block(block, size);

	Assert(is_allocated(block));

	return block;
}
Esempio n. 7
0
File: vm.c Progetto: tspink/ksim
int vm_copy_from(struct ksim_context *ctx, void __guest *addr, void *dest, unsigned int size)
{
	if (!is_allocated(ctx, addr))
		return -1;

	return ctx->arcsim->vm_write(dest, (unsigned long)addr, size);
}
Esempio n. 8
0
// Mukesh
void blocklist()
{


    Node *p = HeapArray;

    // print out the heap
    // size allocated   start   end (addresses in Hex)
    printf("%s   %s     %s            %s\n", "Size", "Allocated", "Start", "End");
    int start = 0;
    int end;
    while (p < HeapArray + HEAP_SIZE) {

        printf("%3d", p->size);

        if (is_allocated(p) == 0)
            printf("  %8s", "No");
        else
            printf("  %8s", "Yes");


        printf("      0x%08x", start);
        end = start + p->size - 1;
        printf("      0x%08x\n", end);
        start = end + 1;

        p = p + block_size(p);

    }

}
Esempio n. 9
0
 inline
 Eigen::Block<const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> >
 block(int start_row, int start_col, int rows, int cols) const {
   assert(is_allocated());
   assert(start_row+rows<=size1());
   assert(start_col+cols<=size2());
   return values_.block(start_row, start_col, rows, cols);
 }
Esempio n. 10
0
void gauge_conf_t::read(const char *path)
{
  if(!is_allocated()) create();
  read_ildg_gauge_conf(U,path);
  reset_theta();
  
  master_printf("plaq: %.18g\n",global_plaquette_lx_conf(U));  
}
Esempio n. 11
0
    inline ResizableMatrix operator-(const ResizableMatrix<Scalar> &M2) const {
      assert(is_allocated());
      assert(size1_==M2.size1_);
      assert(size2_==M2.size2_);

      ResizableMatrix Msum(*this);
      Msum.values_.block(0,0,size1_,size2_) -= M2.values_.block(0,0,size1_,size2_);
      return Msum;
    }
Esempio n. 12
0
 /**
  * Free memory
  */
 void free(void* pos) {
     uint32_t* block = reinterpret_cast<uint32_t*>(pos);
     block--;
     if (!is_allocated(next_block(block))) {
         uint32_t new_size = size_of_block(block) + 4 + size_of_block(next_block(block));
         record(block, new_size, kFree);
     } else {
         record(block, size_of_block(block), kFree);
     }
 }
void CodeSection::initialize_locs_from(const CodeSection* source_cs) {
  int lcount = source_cs->locs_count();
  if (lcount != 0) {
    initialize_shared_locs(source_cs->locs_start(), lcount);
    _locs_end = _locs_limit = _locs_start + lcount;
    assert(is_allocated(), "must have copied code already");
    set_locs_point(start() + source_cs->locs_point_off());
  }
  assert(this->locs_count() == source_cs->locs_count(), "sanity");
}
Esempio n. 14
0
 //Destructive version of resize()
 inline void destructive_resize(int size1, int size2) {
   if (!is_allocated()) {
     values_.resize(size1, size2);
   } else {
     if (size1>memory_size1() || size2>memory_size2()) {
       values_.resize(1.2*size1+1, 1.2*size2+1);
     }
   }
   size1_ = size1;
   size2_ = size2;
 }
Esempio n. 15
0
 //resize while leaving the old values untouched
 //The new elements are not initialized.
 //If the new size is larger than the memory size, memory is reallocated.
 //In this case, we allocate bit larger memory for avoid further memory allocation.
 inline void conservative_resize(int size1, int size2) {
   if (!is_allocated()) {
     values_.resize(size1, size2);
   } else {
     //Should we consider cache line length?
     if (size1>memory_size1() || size2>memory_size2()) {
       values_.conservativeResize(1.2*size1+1, 1.2*size2+1);
     }
   }
   size1_ = size1;
   size2_ = size2;
 }
Esempio n. 16
0
    //This is well-defined for a square matrix
    inline Scalar determinant() const {
      assert(is_allocated());
      assert(size1_==size2_);

      const int size = size1_;

      //the simple ones...
      if(size==0) return 1;
      if(size==1) return operator()(0,0);
      if(size==2) return operator()(0,0)*operator()(1,1)-operator()(0,1)*operator()(1,0);

      return block().determinant();
    }
Esempio n. 17
0
File: gc.c Progetto: Han40/spamOSEK
void mark (Object *obj)
{
    if (obj == JNULL)
        return;

#ifdef VERIFY_GC
    assert (is_allocated (obj), GC0);
#endif

    if (is_gc_marked (obj))
        return;
    set_gc_marked (obj);
    if (is_array (obj))
    {
        if (get_element_type (obj) == T_REFERENCE)
        {
            unsigned short i;
            unsigned short length = get_array_length (obj);
            REFERENCE *refarr = ref_array (obj);

            for (i = 0; i < length; i++)
                mark (refarr[i]);
        }
    }
    else
    {
        ClassRecord *classRecord;
        byte classIndex;

        classIndex = get_na_class_index (obj);
        for (;;)
        {
            classRecord = get_class_record (classIndex);
            // Mark fields of type REFERENCE.
            mark_reference_fields (obj, classRecord);
            if (classIndex == JAVA_LANG_OBJECT)
                break;
            classIndex = classRecord -> parentClass;
        }
    }
}
Esempio n. 18
0
File: vm.c Progetto: tspink/ksim
int vm_alloc_fixed(struct ksim_context *ctx, void __guest *addr, unsigned int size)
{
	struct ksim_vm_info *vmi = thread_current(ctx)->vm;
	struct vm_alloc_region *rgn;
	
	/* Must be aligned to 4-byte boundary. */
	/* TODO: Should this be 8-bytes for 64-bit emulation? */
	if ((unsigned long)addr % 4)
		return -1;
	
	/* Size must be aligned to page size. */
	if (size % GUEST_PAGE_SIZE)
		size += GUEST_PAGE_SIZE - (size % GUEST_PAGE_SIZE);
	
	/* Look for overlapping allocation regions, and instantly refuse
	 * allocation. */
	
	/* TODO: MMAP semantics allow overlapping regions, where
	 * overlapping portions are split. */
	if (is_allocated(ctx, addr)) {
		kdbg("vm: address already allocated\n");
		return -1;
	}
	
	/* Allocate storage for the allocation region descriptor. */
	rgn = malloc(sizeof(*rgn));
	if (!rgn) {
		return -1;
	}
	
	/* Populate the region descriptor, and insert it into the list. */
	rgn->base = (unsigned long)addr;
	rgn->size = size;
	rgn->next = vmi->regions;
	
	vmi->regions = rgn;
	
	kdbg("vm: alloc: base=0x%lx, size=0x%x\n", rgn->base, rgn->size);
	
	return 0;
}
Esempio n. 19
0
    /**
     * Allocate memory
     */
    void* alloc(uint32_t size) {
        // aligned 2
        if (size & 0b1) size += 1;

        void* cur = mstart_;
        for (; cur != mend_; cur = next_block(cur)) {
            uint32_t block_size = size_of_block(cur);
            if (block_size >= size && !is_allocated(cur)) {
                void* remain = slice_block(cur, size);
                uint32_t remain_size = block_size - 4 - size;  // 4 bytes is for header
                if (remain_size > 0) {
                    record(cur, size, kAllocated);
                    record(remain, remain_size, kFree);
                } else {
                    record(cur, block_size, kAllocated);
                }
                return reinterpret_cast<uint8_t*>(cur) + 4;
            }
        }
        return nullptr;
    }
Esempio n. 20
0
bool DiskManager::read_page(Page * page)
{
  if( !update_context(page->get_fname()))
    return false;
    
  if( !is_allocated(page->get_pid()) )
    return true;
   
  if(fseek(file_ , page->get_pid()*Page::PAGE_SIZE , SEEK_SET)){
    Utils::log("[DiskManager] can't change offset in file",ERROR);
    return false;
  }
#ifdef IO_DISK_M
  Utils::log("[DiskManager] read in file page: "+ std::to_string(page->get_pid()) );
#endif
  if( fread(page->get_data(),sizeof(char),Page::PAGE_SIZE,file_) != Page::PAGE_SIZE ){
    Utils::log("[DiskManager] can't read from file",ERROR);
    return false;
  }
  return true;
}
Esempio n. 21
0
 inline void clear() {
   if (is_allocated()) {
     block().setZero();
   }
 }
Esempio n. 22
0
 inline int memory_size2() const {assert(is_allocated()); return values_.cols();}
Esempio n. 23
0
 //swap two rows
 inline void swap_row(int r1, int r2) {
   assert(is_allocated());
   assert(r1<size1_);
   assert(r2<size1_);
   values_.row(r1).swap(values_.row(r2));
 }
Esempio n. 24
0
 inline const Scalar& operator()(const int i, const int j) const {
   assert(is_allocated());
   assert(i<=size1_);
   assert(j<=size2_);
   return values_(i,j);
 }
Esempio n. 25
0
 inline int size2() const {assert(is_allocated()); return size2_;}
Esempio n. 26
0
 inline
 const Eigen::Block<const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> >
 block() const {
   assert(is_allocated());
   return values_.block(0, 0, size1_, size2_);
 }
Esempio n. 27
0
 //swap two columns
 inline void swap_col(int c1, int c2) {
   assert(is_allocated());
   assert(c1<size2_);
   assert(c2<size2_);
   values_.col(c1).swap(values_.col(c2));
 }
Esempio n. 28
0
 inline Scalar trace() {
   assert(is_allocated());
   assert(size1_ == size2_);
   return block().trace();
 }
Esempio n. 29
0
 inline void invert() {
   assert(is_allocated());
   assert(size1_==size2_);
   eigen_matrix_t inv = block().inverse();
   values_ = inv;//Should we use std::swap(*values_,inv)?
 }
Esempio n. 30
0
 inline Scalar max() const {
   assert(is_allocated());
   return block().maxCoeff();
 }