Exemple #1
0
int moreGlass(struct Plex *plex)
{
	struct GlassBlock *gb;
	struct Glass *glasses, *first, *last, *g;

	if (plex -> glassblock == 0) return (0);
	glasses = (struct Glass *) allocate_objects (GLASS, plex -> glassblock);
	if (glasses == NULL) {
		set_error1 ("moreGlass: not enough memory for glasses");
		return (0);
	}
	gb = (struct GlassBlock *) allocate_object (GLASS_BLOCK);
	if (gb == NULL) return (0);
	gb -> glasses = glasses;
	if (plex -> tail_glassblock == NULL)
		plex -> head_glassblock = gb;
	else plex -> tail_glassblock -> next = gb;
	plex -> tail_glassblock = gb;
	first = glasses;
	last = first + plex -> glassblock - 1;
	for (g = first; g < last; g++)
		g -> next = g + 1;
	last -> next = NULL;
	plex -> glasses = glasses;
	plex -> freeglass = glasses;
	return (1);
}
Exemple #2
0
std::shared_ptr<IObject> 
ObjectManager::create_object(int x, int y, float x_vel, float y_vel, 
                             ObjIndex object, Owner owner)
{
    // TODO: Redo to use a Factory pattern.
    // TODO: Read all parameters from file.
    
    std::shared_ptr<IObject> new_obj;

    // Create formation
    if (object >= ObjIndex::enemystd_v_formation)
        create_formation(x, y, x_vel, y_vel, object);

    // Create single entity
    else {
        //Object* new_obj = allocate_object(object, owner);
        new_obj = allocate_object(object, owner);

        new_obj->set_x((float)x - (float)new_obj->width() / 2.0f);
        new_obj->set_y((float)y - (float)new_obj->height() / 2.0f + (float)world_y_pos_);
        new_obj->set_x_vel(x_vel);
        new_obj->set_y_vel(y_vel);

        queue.push_back(new_obj);
    }
    return new_obj;
}
Exemple #3
0
struct chunk *allocate_chunk ()
{
	struct chunk *chk;
	
	chk = (struct chunk *) allocate_object (CHUNK);
	return (chk);
}
Exemple #4
0
  Object* ObjectMemory::new_object_typed(Class* cls, size_t bytes, object_type type) {
    Object* obj;

    obj = allocate_object(bytes);
    set_class(obj, cls);

    obj->obj_type = type;
    obj->RequiresCleanup = type_info[type]->instances_need_cleanup;

    return obj;
  }
Exemple #5
0
struct subpolygon *allocate_subpolygon ()
{
	struct subpolygon *spg;

	/* allocate memory */
	spg = (struct subpolygon *) allocate_object (SUBPOLYGON);
	if (error ()) return (NULL);
	if (spg == NULL) {
		set_error1 ("allocate_subpolygon: ran out of memory");
		return(NULL);
	}
	return (spg);
}
Exemple #6
0
struct subedge *allocate_subedge ()
{
	struct subedge *sed;

	/* allocate memory */
	sed = (struct subedge *) allocate_object (SUBEDGE);
	if (error ()) return (NULL);
	if (sed == NULL) {
		set_error1 ("allocate_subedge: ran out of memory");
		return(NULL);
	}
	return (sed);
}
  Object* ObjectMemory::new_object_typed_dirty(STATE, Class* cls, size_t bytes, object_type type) {
    utilities::thread::SpinLock::LockGuard guard(allocation_lock_);

    Object* obj;

    obj = allocate_object(bytes);
    if(unlikely(!obj)) return NULL;

    obj->set_obj_type(type);
    obj->klass(this, cls);
    obj->ivars(this, cNil);

    return obj;
  }
Exemple #8
0
struct leaf *allocate_leaf ()
{
	struct leaf *lef;
    struct cept *ex;

	lef = (struct leaf *) allocate_object (LEAF);
	if (lef == NULL) {
		ex = new_cept (MEMORY_ERROR,  ALLOCATION,  FATAL_SEVERITY);
		add_function (ex, "allocate_leaf");
		add_source (ex, "msrender.c");
		return(NULL);
	}
	return (lef);
}
Exemple #9
0
  Object* ObjectMemory::new_object_typed(Class* cls, size_t bytes, object_type type) {
    Object* obj;

#ifdef RBX_GC_STATS
    stats::GCStats::get()->young_object_types[type]++;
#endif

    obj = allocate_object(bytes);
    obj->klass(this, cls);

    obj->set_obj_type(type);
    obj->set_requires_cleanup(type_info[type]->instances_need_cleanup);

    return obj;
  }
Exemple #10
0
struct surface *new_surface ()
{
	struct surface *srf_ptr;
    struct cept *ex;
	
	srf_ptr = (struct surface *) allocate_object (SURFACE);
	if (srf_ptr == NULL) {
		ex = new_cept (MEMORY_ERROR,  ALLOCATION,  FATAL_SEVERITY);
		add_object (ex,  SURFACE, "srf_ptr");
		add_function (ex, "new_surface");
		return (NULL);
	}
	srf_ptr -> surface_thickness = DEFAULT_THICKNESS;
	return (srf_ptr);
}
Exemple #11
0
  Object* ObjectMemory::new_object_typed(Class* cls, size_t bytes, object_type type) {
    Object* obj;

#ifdef RBX_GC_STATS
    stats::GCStats::get()->young_object_types[type]++;
#endif

    obj = allocate_object(bytes);
    if(unlikely(!obj)) return NULL;

    obj->klass(this, cls);

    obj->set_obj_type(type);

    return obj;
  }
Exemple #12
0
struct msscene *new_msscene ()
{
	int j, k;
	struct msscene *ms;

	ms = (struct msscene *) allocate_object (MSSCENE);
	if (ms == NULL) return (ms);
	ms -> fperror = stderr;
	ms -> fpinform = stderr;
	ms -> fpdebug = stderr;
	for (j = 0; j < 3; j++)
		for (k = 0; k < 3; k++)
			ms -> rotation[j][k] = ((j == k) ? 1.0 : 0.0);
	ms -> overlap_hue = 1;
	return (ms);
}
Exemple #13
0
void *amqp_allocate(amqp_memory_pool_t *pool)
{
    void *result;

    assert(pool != null);
    assert(pool->initialized);
    assert(pool->initializer_callback != null);

#ifdef DISABLE_MEMORY_POOL
    result = amqp_malloc(pool->object_size TRACE_ARGS);
#else
    assert(pool->object_size_in_fragments != 0);
    result = allocate_object(pool);
#endif

    (*pool->initializer_callback)(pool, result);

    pool->stats.outstanding_allocations++;
    pool->stats.total_allocation_calls++;

    return result;
}
int nugpgpu_ringbuffer_render_init(struct nugpgpu_private *gpu_priv)
{
  int ret;
  u32 head;

  printk(LOG_INFO "nugpgpu_ringbuffer_render_init\n" LOG_END);
  TRACE_IN

  RING->mmio_base = RENDER_RING_BASE;
  RING->size = PAGE_SIZE * RING_PAGES;

  /* Allocate the status page. */
  ret = allocate_object(gpu_priv, &RING->status_obj, 1);
  if (ret){
    printk(LOG_ERR "Failed to allocate the status page\n" LOG_END);
    return 1;
  }

  RING->gva_status = nugpgpu_gtt_insert(gpu_priv, RING->status_obj.pg_list, 
                                        NUGPGPU_CACHE_LLC);
  if (RING->gva_status == (unsigned int)-1){
    printk(LOG_ERR "Failed to insert the status page in gtt\n" LOG_END);
    return 1;
  }

  printk(LOG_INFO "RING->gva_status : 0x%x\n" LOG_END, (unsigned int) RING->gva_status);

  RING->page_status = kmap(sg_page(RING->status_obj.pg_list->sgl));
  if (RING->page_status == NULL) {
    printk(LOG_ERR "Failed to map page_status\n" LOG_END);
    return 1;
  }
  memset(RING->page_status, 0, PAGE_SIZE);
  printk(LOG_INFO "RING->page_status : 0x%lx\n" LOG_END, (unsigned long) RING->page_status);

  /* Allocate the ringbuffer object */
  ret = allocate_object(gpu_priv, &RING->ringbuf_obj, RING_PAGES);
  if (ret){
    printk(LOG_ERR "Failed to allocate the status page\n" LOG_END);
    return 1;
  }

  RING->gva_ringbuffer = nugpgpu_gtt_insert(gpu_priv, RING->ringbuf_obj.pg_list, 
                                            NUGPGPU_CACHE_LLC);
  if (RING->gva_ringbuffer == (unsigned int)-1){
    printk(LOG_ERR "Failed to insert the status page in gtt\n" LOG_END);
    return 1;
  }

  printk(LOG_INFO "RING->gva_ringbuffer : 0x%x\n" LOG_END, (unsigned int) RING->gva_ringbuffer);

  RING->page_ringbuffer = kmap(sg_page(RING->ringbuf_obj.pg_list->sgl));
  if (RING->page_ringbuffer == NULL) {
    printk(LOG_ERR "Failed to map page_ringbuffer\n" LOG_END);
    return 1;
  }

  RING->virtual_start = ioremap_wc(gpu_priv->gtt.mappable_base + PAGE_SIZE, RING->size);
  if (RING->virtual_start == NULL) {
    printk(LOG_ERR "Problem while mapping virtual start ioremap_wc\n" LOG_END);
    return 1;
  }

  printk(LOG_INFO "Allocated the ringbuffer\n" LOG_END);

  /* Initialize the ring now.*/

  gpu_forcewake_get(gpu_priv);

  /* Write status page register */
  printk(LOG_INFO "writing status page register\n" LOG_END);

  NUGPGPU_WRITE(RENDER_HWS_PGA_GEN7, RING->gva_status);
  NUGPGPU_READ(RENDER_HWS_PGA_GEN7);

  flushtlb(gpu_priv);

  // Stop ring
  printk(LOG_INFO "stopping ring\n" LOG_END);

  RING_WRITE_CTL(RING, 0);
  RING_WRITE_HEAD(RING, 0);
  RING_WRITE_TAIL(RING, 0);

  // The doc says this enforces ordering between multiple writes
  head = RING_READ_HEAD(RING) & RING_HEAD_ADDR;
  if ( head !=0 ){
    printk(LOG_ERR "failed to set head to zero\n" LOG_END);
    RING_WRITE_HEAD(RING, 0);

    if (RING_READ_HEAD(RING) & RING_HEAD_ADDR) {
      printk(LOG_ERR "failed to set ring head to zero "
                     "ctl %08x head %08x tail %08x start %08x\n"
             LOG_END,
             RING_READ_CTL(RING),
             RING_READ_HEAD(RING),
             RING_READ_TAIL(RING),
             RING_READ_START(RING));
    }
  }

  /* i915 driver says the below line...?? */
  /* Enforce ordering by reading HEAD register back */
  RING_READ_HEAD(RING);

  /* Comment taken directly from i915 driver */
  /* Initialize the ring. This must happen _after_ we've cleared the ring
   * registers with the above sequence (the readback of the HEAD registers
   * also enforces ordering), otherwise the hw might lose the new ring
   * register values. */
  RING_WRITE_START(RING, RING->gva_ringbuffer);

  RING_WRITE_CTL(RING, (((RING->size - PAGE_SIZE) &
                          RING_NR_PAGES) |
                          RING_VALID));

  /* If the head is still not zero, the ring is dead */
  if( wait_for((RING_READ_CTL(RING) & RING_VALID) != 0 &&
                RING_READ_START(RING) == RING->gva_ringbuffer &&
                (RING_READ_HEAD(RING) & RING_HEAD_ADDR) == 0, 50) ){
    printk(LOG_ERR "ring failed to start ring\n" LOG_END);
    return -EIO;
  }

  RING->head = RING_READ_HEAD(RING);
  RING->tail = RING_READ_TAIL(RING) & RING_TAIL_ADDR;
  RING->space = ring_space(RING);

  printk(LOG_INFO "ring->space = %d\n" LOG_END, RING->space);

  gpu_forcewake_put(gpu_priv);

  RING_WRITE_MODE(RING, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
  RING_WRITE_MODE(RING, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
  RING_WRITE_MODE_GEN7(RING, _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
                       _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
  RING_WRITE_INSTPM(RING, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));

  dword_check(gpu_priv, RING, temp);

  TRACE_OUT
  return 0;
}