Beispiel #1
0
    static void verify_heap()
    {
	if (!debug_heap)
	    return;
	void *p = tospace;
	while (p < next_scan) {
	    obj_t *obj = (obj_t *)p;
	    mem_ops_t *ops = OBJ_MEM_OPS(obj);
	    assert(is_known_ops(ops));
	    size_t size = aligned_size(ops->mo_size(obj));
	    verify_object(obj, true);
	    p += size;
	}
	if (p != next_scan) {
	    printf("verify_heap: to_space=%p\n", tospace);
	    printf("            next_scan=%p\n", next_scan);
	    printf("                    p=%p\n", p);
	    printf("           next_alloc=%p\n", next_alloc);
	    printf("         to_space_end=%p\n", tospace_end);
	    printf("            alloc_end=%0\n", alloc_end);
	}
	assert(p == next_scan);
	while (p < next_alloc) {
	    obj_t *obj = (obj_t *)p;
	    mem_ops_t *ops = OBJ_MEM_OPS(obj);
	    assert(is_known_ops(ops));
	    verify_object(obj, false);
	    size_t size = aligned_size(ops->mo_size(obj));
	    size_t i, nptr = ops->mo_ptr_count(obj);
	    for (i = 0; i < nptr; i++)
		ops->mo_get_ptr(obj, i);
	    p += size;
	}
    }
  void emplace_back_sep(ArgTypes &&... args) {
    /* Alignment requirement of the type. */
    static_assert(alignof(KernelType) <= 8, "kernel types require alignment to be at most 8 bytes");

    size_t offset = m_size;
    m_size += aligned_size(sizeof(PrefixType)) + aligned_size(sizeof(KernelType));
    reserve(m_size);

    PrefixType::template init<KernelType>(this->get_at<PrefixType>(offset), std::forward<ArgTypes>(args)...);
  }
void Texture::set(const BMPb& bmp){
  unsigned char* data=NULL;
  sizeX=bmp.w;
  sizeY=bmp.h;
  if(sizeX==0||sizeY==0)
    return;
  int align=aligned_base(sizeX);

  if(align==0&&(sizeof(bmp.rgb[0])==3)){
  }else{
    data=(unsigned char*)malloc(aligned_size(sizeX,sizeY));
    bmp_for3(bmp)
      data[aligned_pos(sizeX,x,y,z)]=bmp(x,y,z);
  }
  if(!texturep){
    glGenTextures( 1, &texture );
    texturep=1;
  }
  glBindTexture( GL_TEXTURE_RECTANGLE_ARB, texture );
  glTexParameterf( GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
  glTexParameterf( GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
       
  if(align==0&&(sizeof(bmp.rgb[0])==3)){
    glTexImage2D( GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGBA, sizeX, sizeY, 0, GL_RGB, GL_UNSIGNED_BYTE, bmp.rgb );
  }else{
    glTexImage2D( GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGBA, sizeX, sizeY, 0, GL_RGB, GL_UNSIGNED_BYTE, data );
    free(data);
  }
}
Beispiel #4
0
static char *tp_from_combined_area(const struct tls_info *info,
                                   void *combined_area, size_t tdb_size) {
  size_t tls_size = info->tdata_size + info->tbss_size;
  ptrdiff_t tdboff = __nacl_tp_tdb_offset(tdb_size);
  if (tdboff < 0) {
    /*
     * The combined area is big enough to hold the TDB and then be aligned
     * up to the $tp alignment requirement.  If the whole area is aligned
     * to the $tp requirement, then aligning the beginning of the area
     * would give us the beginning unchanged, which is not what we need.
     * Instead, align from the putative end of the TDB, to decide where
     * $tp--the true end of the TDB--should actually lie.
     */
    return aligned_addr((char *) combined_area + tdb_size, info->tls_alignment);
  } else {
    /*
     * The linker increases the size of the TLS block up to its alignment
     * requirement, and that total is subtracted from the $tp address to
     * access the TLS area.  To keep that final address properly aligned,
     * we need to align up from the allocated space and then add the
     * aligned size.
     */
    tls_size = aligned_size(tls_size, info->tls_alignment);
    return aligned_addr((char *) combined_area, info->tls_alignment) + tls_size;
  }
}
Beispiel #5
0
 void set_size(int len){
   int buffsz=aligned_size(len*sizeof(T)+A_1);
   if(buffsz>this->size){
     this->alloc(len);
   }else if(4*buffsz<this->size){
     this->alloc(len);
   }
 }
Beispiel #6
0
 void alloc(int len){
   int buffsz=aligned_size(len*sizeof(T)+A_1);
   this->free();
   this->size=buffsz;
   this->buff=new byte[this->size];
   this->data=reinterpret_cast<T*>(
     reinterpret_cast<stdm::intptr_t>(this->buff)+A_1 & ~A_1
     );
 }
Beispiel #7
0
void *__nacl_tls_initialize_memory(void *combined_area, size_t tdb_size) {
  const struct tls_info *info = get_tls_info();
  size_t tls_size = info->tdata_size + info->tbss_size;
  char *combined_area_end =
      (char *) combined_area + __nacl_tls_combined_size(tdb_size);
  void *tp = tp_from_combined_area(info, combined_area, tdb_size);
  char *start = tp;

  if (__nacl_tp_tls_offset(0) > 0) {
    /*
     * From $tp, we skip the header size and then must round up from
     * there to the required alignment (which is what the linker will
     * will do when calculating TPOFF relocations at link time).  The
     * end result is that the offset from $tp matches the one chosen
     * by the linker exactly and that the final address is aligned to
     * info->tls_alignment (since $tp was already aligned to at least
     * that much).
     */
    start += aligned_size(__nacl_tp_tls_offset(tls_size), info->tls_alignment);
  } else {
    /*
     * We'll subtract the aligned size of the TLS block from $tp, which
     * must itself already be adequately aligned.
     */
    start += __nacl_tp_tls_offset(aligned_size(tls_size, info->tls_alignment));
  }

  /* Sanity check.  (But avoid pulling in assert() here.) */
  if (start + info->tdata_size + info->tbss_size > combined_area_end)
    simple_abort();
  memcpy(start, info->tdata_start, info->tdata_size);
  memset(start + info->tdata_size, 0, info->tbss_size);

  if (__nacl_tp_tdb_offset(tdb_size) == 0) {
    /*
     * On x86 (but not on ARM), the TDB sits directly at $tp and the
     * first word there must hold the $tp pointer itself.
     */
    void *tdb = (char *) tp + __nacl_tp_tdb_offset(tdb_size);
    *(void **) tdb = tdb;
  }

  return tp;
}
Beispiel #8
0
static void *scan_obj(obj_t *obj)
{
    mem_ops_t *ops = OBJ_MEM_OPS(obj);
    assert(is_known_ops(ops));
    size_t size = aligned_size(ops->mo_size(obj));
    size_t i, n_ptrs = ops->mo_ptr_count(obj);
    for (i = 0; i < n_ptrs; i++) {
	ops->mo_set_ptr(obj, i, move_obj(ops->mo_get_ptr(obj, i)));
    }
    return (void *)obj + size;
}
Beispiel #9
0
static obj_t *move_obj(obj_t *obj)
{
    if (is_null(obj) || is_in_tospace(obj))
	return obj;
    if (OBJ_IS_FWD(obj))
	return OBJ_FWD_PTR(obj);
    assert(is_known_ops(OBJ_MEM_OPS(obj)));
    size_t size = aligned_size(OBJ_MEM_OPS(obj)->mo_size(obj));
    assert(next_alloc + size <= alloc_end);
    obj_t *new_obj = next_alloc;
    next_alloc += size;
    assert(next_alloc <= alloc_end);
    OBJ_MEM_OPS(obj)->mo_move(obj, new_obj);
    OBJ_SET_FWD(obj, new_obj);
    return new_obj;
}
Beispiel #10
0
obj_t *mem_alloc_obj(const mem_ops_t *ops, size_t size_bytes)
{
    assert(heap_is_initialized);
    verify_heap();
    remember_ops(ops);
    size_t alloc_size = aligned_size(size_bytes);
    if (next_alloc > alloc_end - alloc_size) {
	copy_heap();
	assert(next_alloc <= tospace_end - alloc_size && "out of memory");
    }
    const mem_ops_t **p;
    /* with lock */ {
        p = next_alloc;
        next_alloc += alloc_size;
    }
    *p = ops;
    return (obj_t *)p;
}
Beispiel #11
0
void AlignedBuffer<Properties>::resize(const size_t new_size) {
  size_t new_aligned_size = aligned_size(sz);
  if (size() == new_aligned_size) {
    return;
  }
  void* new_data = nullptr;
  if (0 != new_aligned_size) {
    int alloc_result =
        posix_memalign(&new_data, Properties::kAlignment, new_aligned_size);
    if (0 != alloc_result) {
      throw std::bad_alloc();
    }
    size_t copy_size = new_aligned_size > size() ? size() : new_aligned_size;
    memcpy(new_data, data_, copy_size);
  }
  if (nullptr != data_) {
    free(data_);
  }
  data_ = new_data;
  size_ = new_aligned_size;
} // end of AlignedBuffer<Properties>::resize
Beispiel #12
0
size_t __nacl_tls_combined_size(size_t tdb_size) {
  const struct tls_info *info = get_tls_info();
  size_t tls_size = info->tdata_size + info->tbss_size;
  ptrdiff_t tlsoff = __nacl_tp_tls_offset(tls_size);
  size_t combined_size = tls_size + tdb_size;
  /*
   * __nacl_tls_initialize_memory() accepts a non-aligned pointer; it
   * aligns the thread pointer itself.  We have to reserve some extra
   * space to allow this alignment padding to occur.
   */
  combined_size += info->tls_alignment - 1;
  if (tlsoff > 0) {
    /*
     * ARM case: We have to add ARM's 8 byte header, because that is
     * not incorporated into tls_size.  Furthermore, the header is
     * padded out to tls_alignment.
     */
    combined_size += aligned_size(tlsoff, info->tls_alignment);
  }
  return combined_size;
}
Texture&
Texture::operator = (const Texture& tex){
  sizeX=tex.sizeX;
  sizeY=tex.sizeY;

  if(!texturep){
    texturep=1;
    glGenTextures( 1, &texture );
  }
  glBindTexture( GL_TEXTURE_RECTANGLE_ARB, tex.texture );
  unsigned char* data=(unsigned char*)malloc(aligned_size(sizeX,sizeY));
  glGetTexImage(GL_TEXTURE_2D,0,GL_RGB,GL_UNSIGNED_BYTE,data);
    
  glBindTexture( GL_TEXTURE_RECTANGLE_ARB, texture );
  glTexParameterf( GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
  glTexParameterf( GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
  glTexImage2D( GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGBA, sizeX, sizeY, 0, GL_RGB, GL_UNSIGNED_BYTE, data );

  free(data);
  
  return *this;
}
Beispiel #14
0
static char *aligned_addr(void *start, size_t alignment) {
  return (void *) aligned_size((size_t) start, alignment);
}
 void emplace_back(size_t size) {
   m_size += aligned_size(size);
   reserve(m_size);
 }