Esempio n. 1
0
/**
 * Actually destroy the buffer.
 */
static INLINE void
_pb_cache_buffer_destroy(struct pb_cache_buffer *buf)
{
   struct pb_cache_manager *mgr = buf->mgr;

   LIST_DEL(&buf->head);
   assert(mgr->numDelayed);
   --mgr->numDelayed;
   assert(!pipe_is_referenced(&buf->base.base.reference));
   pb_reference(&buf->buffer, NULL);
   FREE(buf);
}
Esempio n. 2
0
static void r600_texture_destroy(struct pipe_screen *screen,
				 struct pipe_resource *ptex)
{
	struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex;
	struct si_resource *resource = &rtex->resource;

	if (rtex->flushed_depth_texture)
		si_resource_reference((struct si_resource **)&rtex->flushed_depth_texture, NULL);

	pb_reference(&resource->buf, NULL);
	FREE(rtex);
}
Esempio n. 3
0
static void r300_buffer_destroy(struct pipe_screen *screen,
				struct pipe_resource *buf)
{
    struct r300_resource *rbuf = r300_resource(buf);

    FREE(rbuf->malloced_buffer);

    if (rbuf->buf)
        pb_reference(&rbuf->buf, NULL);

    FREE(rbuf);
}
Esempio n. 4
0
static void
pb_debug_buffer_destroy(struct pb_buffer *_buf)
{
   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);  
   
   assert(!buf->base.base.refcount);
   
   pb_debug_buffer_check(buf);

   pb_reference(&buf->buffer, NULL);
   FREE(buf);
}
Esempio n. 5
0
static void r300_buffer_destroy(struct pipe_screen *screen,
				struct pipe_resource *buf)
{
    struct r300_screen *r300screen = r300_screen(screen);
    struct r300_resource *rbuf = r300_resource(buf);

    if (rbuf->constant_buffer)
        FREE(rbuf->constant_buffer);

    if (rbuf->buf)
        pb_reference(&rbuf->buf, NULL);

    util_slab_free(&r300screen->pool_buffers, rbuf);
}
Esempio n. 6
0
int mappedfile_setfile(struct vmproc *owner,
	struct vir_region *region, int fd, u64_t offset,
	dev_t dev, ino_t ino, u16_t clearend, int prefill, int mayclosefd)
{
	vir_bytes vaddr;
	struct fdref *newref;

	newref = fdref_dedup_or_new(owner, ino, dev, fd, mayclosefd);

	assert(newref);
	assert(!region->param.file.inited);
	assert(dev != NO_DEV);
	fdref_ref(newref, region);
	region->param.file.offset = offset;
	region->param.file.clearend = clearend;
	region->param.file.inited = 1;

	if(!prefill) return OK;

	for(vaddr = 0; vaddr < region->length; vaddr+=VM_PAGE_SIZE) {
		struct cached_page *cp = NULL;
		struct phys_region *pr;
		u64_t referenced_offset = offset + vaddr;

		if(roundup(vaddr+region->param.file.clearend,
			VM_PAGE_SIZE) >= region->length) {
			break;
		}

		if(ino == VMC_NO_INODE) {
			cp = find_cached_page_bydev(dev, referenced_offset,
			  	VMC_NO_INODE, 0, 1);
		} else {
			cp = find_cached_page_byino(dev, ino,
				referenced_offset, 1);
		}
		if(!cp) continue;
		if(!(pr = pb_reference(cp->page, vaddr, region,
			&mem_type_mappedfile))) {
			printf("mappedfile_setfile: pb_reference failed\n");
			break;
		}
		if(map_ph_writept(region->parent, region, pr) != OK) {
			printf("mappedfile_setfile: map_ph_writept failed\n");
			break;
		}
	}

	return OK;
}
Esempio n. 7
0
static void
pb_cache_buffer_destroy(struct pb_buffer *_buf)
{
   struct pb_cache_buffer *buf = pb_cache_buffer(_buf);   
   struct pb_cache_manager *mgr = buf->mgr;

   if (!mgr) {
      pb_reference(&buf->buffer, NULL);
      FREE(buf);
      return;
   }

   pb_cache_add_buffer(&buf->cache_entry);
}
Esempio n. 8
0
enum pipe_error
pb_validate_add_buffer(struct pb_validate *vl,
                       struct pb_buffer *buf,
                       unsigned flags)
{
   assert(buf);
   if(!buf)
      return PIPE_ERROR;

   assert(flags & PIPE_BUFFER_USAGE_GPU_READ_WRITE);
   assert(!(flags & ~PIPE_BUFFER_USAGE_GPU_READ_WRITE));
   flags &= PIPE_BUFFER_USAGE_GPU_READ_WRITE;

   /* We only need to store one reference for each buffer, so avoid storing
    * consecutive references for the same buffer. It might not be the most 
    * common pattern, but it is easy to implement.
    */
   if(vl->used && vl->entries[vl->used - 1].buf == buf) {
      vl->entries[vl->used - 1].flags |= flags;
      return PIPE_OK;
   }
   
   /* Grow the table */
   if(vl->used == vl->size) {
      unsigned new_size;
      struct pb_validate_entry *new_entries;
      
      new_size = vl->size * 2;
      if(!new_size)
	 return PIPE_ERROR_OUT_OF_MEMORY;

      new_entries = (struct pb_validate_entry *)REALLOC(vl->entries,
                                                        vl->size*sizeof(struct pb_validate_entry),
                                                        new_size*sizeof(struct pb_validate_entry));
      if(!new_entries)
         return PIPE_ERROR_OUT_OF_MEMORY;
      
      memset(new_entries + vl->size, 0, (new_size - vl->size)*sizeof(struct pb_validate_entry));
      
      vl->size = new_size;
      vl->entries = new_entries;
   }
   
   assert(!vl->entries[vl->used].buf);
   pb_reference(&vl->entries[vl->used].buf, buf);
   vl->entries[vl->used].flags = flags;
   ++vl->used;
   
   return PIPE_OK;
}
Esempio n. 9
0
static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs *rcs,
                                          struct radeon_bo_list_item *list)
{
    struct amdgpu_cs *cs = amdgpu_cs(rcs);
    int i;

    if (list) {
        for (i = 0; i < cs->num_buffers; i++) {
            pb_reference(&list[i].buf, &cs->buffers[i].bo->base);
            list[i].vm_address = cs->buffers[i].bo->va;
            list[i].priority_usage = cs->buffers[i].priority_usage;
        }
    }
    return cs->num_buffers;
}
Esempio n. 10
0
static void
pool_bufmgr_destroy(struct pb_manager *mgr)
{
   struct pool_pb_manager *pool = pool_pb_manager(mgr);
   pipe_mutex_lock(pool->mutex);

   FREE(pool->bufs);
   
   pb_unmap(pool->buffer);
   pb_reference(&pool->buffer, NULL);
   
   pipe_mutex_unlock(pool->mutex);
   
   FREE(mgr);
}
Esempio n. 11
0
static void
mm_bufmgr_destroy(struct pb_manager *mgr)
{
   struct mm_pb_manager *mm = mm_pb_manager(mgr);
   
   mtx_lock(&mm->mutex);

   u_mmDestroy(mm->heap);
   
   pb_unmap(mm->buffer);
   pb_reference(&mm->buffer, NULL);
   
   mtx_unlock(&mm->mutex);
   
   FREE(mgr);
}
Esempio n. 12
0
static void r600_texture_destroy(struct pipe_screen *screen,
				 struct pipe_resource *ptex)
{
	struct r600_texture *rtex = (struct r600_texture*)ptex;
	struct r600_resource *resource = &rtex->resource;

	if (rtex->flushed_depth_texture)
		pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);

	pipe_resource_reference((struct pipe_resource**)&rtex->htile_buffer, NULL);
	if (rtex->cmask_buffer != &rtex->resource) {
	    pipe_resource_reference((struct pipe_resource**)&rtex->cmask_buffer, NULL);
	}
	pb_reference(&resource->buf, NULL);
	FREE(rtex);
}
Esempio n. 13
0
static int anon_contig_new(struct vir_region *region)
{
        u32_t allocflags;
	phys_bytes new_pages, new_page_cl, cur_ph;
	phys_bytes p, pages;

        allocflags = vrallocflags(region->flags);

	pages = region->length/VM_PAGE_SIZE;

	assert(physregions(region) == 0);

	for(p = 0; p < pages; p++) {
		struct phys_block *pb = pb_new(MAP_NONE);
		struct phys_region *pr = NULL;
		if(pb)
			pr = pb_reference(pb, p * VM_PAGE_SIZE, region, &mem_type_anon_contig);
		if(!pr) {
			if(pb) pb_free(pb);
			map_free(region);
			return ENOMEM;
		}
	}

	assert(physregions(region) == pages);

	if((new_page_cl = alloc_mem(pages, allocflags)) == NO_MEM) {
		map_free(region);
		return ENOMEM;
	}

	cur_ph = new_pages = CLICK2ABS(new_page_cl);

	for(p = 0; p < pages; p++) {
		struct phys_region *pr = physblock_get(region, p * VM_PAGE_SIZE);
		assert(pr);
		assert(pr->ph);
		assert(pr->ph->phys == MAP_NONE);
		assert(pr->offset == p * VM_PAGE_SIZE);
		pr->ph->phys = cur_ph + pr->offset;
	}

	return OK;
}
Esempio n. 14
0
/**
 * Actually destroy the buffer.
 */
static INLINE void
_fenced_buffer_destroy(struct fenced_buffer *fenced_buf)
{
   struct fenced_buffer_list *fenced_list = fenced_buf->list;
   
   assert(!pipe_is_referenced(&fenced_buf->base.base.reference));
   assert(!fenced_buf->fence);
#ifdef DEBUG
   assert(fenced_buf->head.prev);
   assert(fenced_buf->head.next);
   LIST_DEL(&fenced_buf->head);
   assert(fenced_list->numUnfenced);
   --fenced_list->numUnfenced;
#else
   (void)fenced_list;
#endif
   pb_reference(&fenced_buf->buffer, NULL);
   FREE(fenced_buf);
}
Esempio n. 15
0
enum pipe_error
pb_validate_add_buffer(struct pb_validate *vl,
                       struct pb_buffer *buf)
{
    assert(buf);
    if(!buf)
        return PIPE_ERROR;

    /* We only need to store one reference for each buffer, so avoid storing
     * consecutive references for the same buffer. It might not be the more
     * common pasttern, but it is easy to implement.
     */
    if(vl->used && vl->buffers[vl->used - 1] == buf) {
        return PIPE_OK;
    }

    /* Grow the table */
    if(vl->used == vl->size) {
        unsigned new_size;
        struct pb_buffer **new_buffers;

        new_size = vl->size * 2;
        if(!new_size)
            return PIPE_ERROR_OUT_OF_MEMORY;

        new_buffers = (struct pb_buffer **)REALLOC(vl->buffers,
        vl->size*sizeof(struct pb_buffer *),
        new_size*sizeof(struct pb_buffer *));
        if(!new_buffers)
            return PIPE_ERROR_OUT_OF_MEMORY;

        memset(new_buffers + vl->size, 0, (new_size - vl->size)*sizeof(struct pb_buffer *));

        vl->size = new_size;
        vl->buffers = new_buffers;
    }

    assert(!vl->buffers[vl->used]);
    pb_reference(&vl->buffers[vl->used], buf);
    ++vl->used;

    return PIPE_OK;
}
Esempio n. 16
0
static void
pb_debug_buffer_destroy(struct pb_buffer *_buf)
{
   struct pb_debug_buffer *buf = pb_debug_buffer(_buf);
   struct pb_debug_manager *mgr = buf->mgr;
   
   assert(!pipe_is_referenced(&buf->base.base.reference));
   
   pb_debug_buffer_check(buf);

   pipe_mutex_lock(mgr->mutex);
   LIST_DEL(&buf->head);
   pipe_mutex_unlock(mgr->mutex);

   pipe_mutex_destroy(buf->mutex);
   
   pb_reference(&buf->buffer, NULL);
   FREE(buf);
}
Esempio n. 17
0
static void r300_end_query(struct pipe_context* pipe,
	                   struct pipe_query* query)
{
    struct r300_context* r300 = r300_context(pipe);
    struct r300_query *q = r300_query(query);

    if (q->type == PIPE_QUERY_GPU_FINISHED) {
        pb_reference(&q->buf, NULL);
        r300_flush(pipe, RADEON_FLUSH_ASYNC,
                   (struct pipe_fence_handle**)&q->buf);
        return;
    }

    if (q != r300->query_current) {
        fprintf(stderr, "r300: end_query: Got invalid query.\n");
        assert(0);
        return;
    }

    r300_stop_query(r300);
}
Esempio n. 18
0
bool r600_alloc_resource(struct r600_common_screen *rscreen,
			 struct r600_resource *res)
{
	struct pb_buffer *old_buf, *new_buf;

	/* Allocate a new resource. */
	new_buf = rscreen->ws->buffer_create(rscreen->ws, res->bo_size,
					     res->bo_alignment,
					     res->domains, res->flags);
	if (!new_buf) {
		return false;
	}

	/* Replace the pointer such that if res->buf wasn't NULL, it won't be
	 * NULL. This should prevent crashes with multiple contexts using
	 * the same buffer where one of the contexts invalidates it while
	 * the others are using it. */
	old_buf = res->buf;
	res->buf = new_buf; /* should be atomic */

	if (rscreen->info.has_virtual_memory)
		res->gpu_address = rscreen->ws->buffer_get_virtual_address(res->buf);
	else
		res->gpu_address = 0;

	pb_reference(&old_buf, NULL);

	util_range_set_empty(&res->valid_buffer_range);
	res->TC_L2_dirty = false;

	/* Print debug information. */
	if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {
		fprintf(stderr, "VM start=0x%"PRIX64"  end=0x%"PRIX64" | Buffer %"PRIu64" bytes\n",
			res->gpu_address, res->gpu_address + res->buf->size,
			res->buf->size);
	}
	return true;
}
Esempio n. 19
0
/**
 * join surfaces into the same buffer with identical tiling params
 * sumup their sizes and replace the backend buffers with a single bo
 */
void rvid_join_surfaces(struct radeon_winsys* ws,
			struct pb_buffer** buffers[VL_NUM_COMPONENTS],
			struct radeon_surf *surfaces[VL_NUM_COMPONENTS])
{
	unsigned best_tiling, best_wh, off;
	unsigned size, alignment;
	struct pb_buffer *pb;
	unsigned i, j;

	for (i = 0, best_tiling = 0, best_wh = ~0; i < VL_NUM_COMPONENTS; ++i) {
		unsigned wh;

		if (!surfaces[i])
			continue;

		/* choose the smallest bank w/h for now */
		wh = surfaces[i]->bankw * surfaces[i]->bankh;
		if (wh < best_wh) {
			best_wh = wh;
			best_tiling = i;
		}
	}

	for (i = 0, off = 0; i < VL_NUM_COMPONENTS; ++i) {
		if (!surfaces[i])
			continue;

		/* copy the tiling parameters */
		surfaces[i]->bankw = surfaces[best_tiling]->bankw;
		surfaces[i]->bankh = surfaces[best_tiling]->bankh;
		surfaces[i]->mtilea = surfaces[best_tiling]->mtilea;
		surfaces[i]->tile_split = surfaces[best_tiling]->tile_split;

		/* adjust the texture layer offsets */
		off = align(off, surfaces[i]->surf_alignment);
		for (j = 0; j < ARRAY_SIZE(surfaces[i]->level); ++j)
			surfaces[i]->level[j].offset += off;
		off += surfaces[i]->surf_size;
	}

	for (i = 0, size = 0, alignment = 0; i < VL_NUM_COMPONENTS; ++i) {
		if (!buffers[i] || !*buffers[i])
			continue;

		size = align(size, (*buffers[i])->alignment);
		size += (*buffers[i])->size;
		alignment = MAX2(alignment, (*buffers[i])->alignment * 1);
	}

	if (!size)
		return;

	/* TODO: 2D tiling workaround */
	alignment *= 2;

	pb = ws->buffer_create(ws, size, alignment, RADEON_DOMAIN_VRAM, 0);
	if (!pb)
		return;

	for (i = 0; i < VL_NUM_COMPONENTS; ++i) {
		if (!buffers[i] || !*buffers[i])
			continue;

		pb_reference(buffers[i], pb);
	}

	pb_reference(&pb, NULL);
}
Esempio n. 20
0
void si_context_gfx_flush(void *context, unsigned flags,
			  struct pipe_fence_handle **fence)
{
	struct si_context *ctx = context;
	struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
	struct radeon_winsys *ws = ctx->b.ws;

	if (ctx->gfx_flush_in_progress)
		return;

	ctx->gfx_flush_in_progress = true;

	if (cs->cdw == ctx->b.initial_gfx_cs_size &&
	    (!fence || ctx->last_gfx_fence)) {
		if (fence)
			ws->fence_reference(fence, ctx->last_gfx_fence);
		if (!(flags & RADEON_FLUSH_ASYNC))
			ws->cs_sync_flush(cs);
		ctx->gfx_flush_in_progress = false;
		return;
	}

	r600_preflush_suspend_features(&ctx->b);

	ctx->b.flags |= SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER |
			SI_CONTEXT_INV_VMEM_L1 |
			SI_CONTEXT_INV_GLOBAL_L2 |
			/* this is probably not needed anymore */
			SI_CONTEXT_PS_PARTIAL_FLUSH;
	si_emit_cache_flush(ctx, NULL);

	/* force to keep tiling flags */
	flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;

	if (ctx->trace_buf)
		si_trace_emit(ctx);

	if (ctx->is_debug) {
		unsigned i;

		/* Save the IB for debug contexts. */
		free(ctx->last_ib);
		ctx->last_ib_dw_size = cs->cdw;
		ctx->last_ib = malloc(cs->cdw * 4);
		memcpy(ctx->last_ib, cs->buf, cs->cdw * 4);
		r600_resource_reference(&ctx->last_trace_buf, ctx->trace_buf);
		r600_resource_reference(&ctx->trace_buf, NULL);

		/* Save the buffer list. */
		if (ctx->last_bo_list) {
			for (i = 0; i < ctx->last_bo_count; i++)
				pb_reference(&ctx->last_bo_list[i].buf, NULL);
			free(ctx->last_bo_list);
		}
		ctx->last_bo_count = ws->cs_get_buffer_list(cs, NULL);
		ctx->last_bo_list = calloc(ctx->last_bo_count,
					   sizeof(ctx->last_bo_list[0]));
		ws->cs_get_buffer_list(cs, ctx->last_bo_list);
	}

	/* Flush the CS. */
	ws->cs_flush(cs, flags, &ctx->last_gfx_fence,
		     ctx->screen->b.cs_count++);

	if (fence)
		ws->fence_reference(fence, ctx->last_gfx_fence);

	/* Check VM faults if needed. */
	if (ctx->screen->b.debug_flags & DBG_CHECK_VM)
		si_check_vm_faults(ctx);

	si_begin_new_cs(ctx);
	ctx->gfx_flush_in_progress = false;
}
Esempio n. 21
0
static void *r600_buffer_transfer_map(struct pipe_context *ctx,
                                      struct pipe_resource *resource,
                                      unsigned level,
                                      unsigned usage,
                                      const struct pipe_box *box,
				      struct pipe_transfer **ptransfer)
{
	struct r600_context *rctx = (struct r600_context*)ctx;
	struct r600_resource *rbuffer = r600_resource(resource);
	uint8_t *data;

	assert(box->x + box->width <= resource->width0);

	if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
	    !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
		assert(usage & PIPE_TRANSFER_WRITE);

		/* Check if mapping this buffer would cause waiting for the GPU. */
		if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
		    rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
			unsigned i, mask;

			/* Discard the buffer. */
			pb_reference(&rbuffer->buf, NULL);

			/* Create a new one in the same pipe_resource. */
			/* XXX We probably want a different alignment for buffers and textures. */
			r600_init_resource(rctx->screen, rbuffer, rbuffer->b.b.width0, 4096,
					   rbuffer->b.b.bind, rbuffer->b.b.usage);

			/* We changed the buffer, now we need to bind it where the old one was bound. */
			/* Vertex buffers. */
			mask = rctx->vertex_buffer_state.enabled_mask;
			while (mask) {
				i = u_bit_scan(&mask);
				if (rctx->vertex_buffer_state.vb[i].buffer == &rbuffer->b.b) {
					rctx->vertex_buffer_state.dirty_mask |= 1 << i;
					r600_vertex_buffers_dirty(rctx);
				}
			}
			/* Streamout buffers. */
			for (i = 0; i < rctx->num_so_targets; i++) {
				if (rctx->so_targets[i]->b.buffer == &rbuffer->b.b) {
					r600_context_streamout_end(rctx);
					rctx->streamout_start = TRUE;
					rctx->streamout_append_bitmask = ~0;
				}
			}
			/* Constant buffers. */
			r600_set_constants_dirty_if_bound(rctx, rbuffer);
		}
	}
	else if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
		 !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
		 rctx->screen->has_streamout &&
		 /* The buffer range must be aligned to 4. */
		 box->x % 4 == 0 && box->width % 4 == 0) {
		assert(usage & PIPE_TRANSFER_WRITE);

		/* Check if mapping this buffer would cause waiting for the GPU. */
		if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
		    rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
			/* Do a wait-free write-only transfer using a temporary buffer. */
			struct r600_resource *staging = (struct r600_resource*)
				pipe_buffer_create(ctx->screen, PIPE_BIND_VERTEX_BUFFER,
						   PIPE_USAGE_STAGING,
						   box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT));
			data = rctx->ws->buffer_map(staging->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);

			if (!data)
				return NULL;

			data += box->x % R600_MAP_BUFFER_ALIGNMENT;
			return r600_buffer_get_transfer(ctx, resource, level, usage, box,
							ptransfer, data, staging);
		}
	}

	data = rctx->ws->buffer_map(rbuffer->cs_buf, rctx->cs, usage);
	if (!data) {
		return NULL;
	}
	data += box->x;

	return r600_buffer_get_transfer(ctx, resource, level, usage, box,
					ptransfer, data, NULL);
}
Esempio n. 22
0
static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
                                                           struct winsys_handle *whandle,
                                                           unsigned *stride,
                                                           unsigned *size)
{
    struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
    struct radeon_bo *bo;
    struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
    struct drm_gem_open open_arg = {};

    /* We must maintain a list of pairs <handle, bo>, so that we always return
     * the same BO for one particular handle. If we didn't do that and created
     * more than one BO for the same handle and then relocated them in a CS,
     * we would hit a deadlock in the kernel.
     *
     * The list of pairs is guarded by a mutex, of course. */
    pipe_mutex_lock(mgr->bo_handles_mutex);

    /* First check if there already is an existing bo for the handle. */
    bo = util_hash_table_get(mgr->bo_handles, (void*)(uintptr_t)whandle->handle);
    if (bo) {
        /* Increase the refcount. */
        struct pb_buffer *b = NULL;
        pb_reference(&b, &bo->base);
        goto done;
    }

    /* There isn't, create a new one. */
    bo = CALLOC_STRUCT(radeon_bo);
    if (!bo) {
        goto fail;
    }

    /* Open the BO. */
    open_arg.name = whandle->handle;
    if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
        FREE(bo);
        goto fail;
    }
    bo->handle = open_arg.handle;
    bo->size = open_arg.size;
    bo->name = whandle->handle;

    /* Initialize it. */
    pipe_reference_init(&bo->base.base.reference, 1);
    bo->base.base.alignment = 0;
    bo->base.base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
    bo->base.base.size = bo->size;
    bo->base.vtbl = &radeon_bo_vtbl;
    bo->mgr = mgr;
    bo->rws = mgr->rws;
    pipe_mutex_init(bo->map_mutex);

    util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)whandle->handle, bo);

done:
    pipe_mutex_unlock(mgr->bo_handles_mutex);

    if (stride)
        *stride = whandle->stride;
    if (size)
        *size = bo->base.base.size;

    return (struct pb_buffer*)bo;

fail:
    pipe_mutex_unlock(mgr->bo_handles_mutex);
    return NULL;
}
Esempio n. 23
0
static struct pb_buffer *
pb_cache_manager_create_buffer(struct pb_manager *_mgr, 
                               size_t size,
                               const struct pb_desc *desc)
{
   struct pb_cache_manager *mgr = pb_cache_manager(_mgr);
   struct pb_cache_buffer *buf;
   struct pb_cache_buffer *curr_buf;
   struct list_head *curr, *next;
   struct util_time now;
   
   pipe_mutex_lock(mgr->mutex);

   buf = NULL;
   curr = mgr->delayed.next;
   next = curr->next;
   
   /* search in the expired buffers, freeing them in the process */
   util_time_get(&now);
   while(curr != &mgr->delayed) {
      curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
      if(!buf && pb_cache_is_buffer_compat(curr_buf, size, desc))
	 buf = curr_buf;
      else if(util_time_timeout(&curr_buf->start, &curr_buf->end, &now))
	 _pb_cache_buffer_destroy(curr_buf);
      else
         /* This buffer (and all hereafter) are still hot in cache */
         break;
      curr = next; 
      next = curr->next;
   }

   /* keep searching in the hot buffers */
   if(!buf) {
      while(curr != &mgr->delayed) {
         curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head);
         if(pb_cache_is_buffer_compat(curr_buf, size, desc)) {
            buf = curr_buf;
            break;
         }
         /* no need to check the timeout here */
         curr = next;
         next = curr->next;
      }
   }
   
   if(buf) {
      LIST_DEL(&buf->head);
      pipe_mutex_unlock(mgr->mutex);
      /* Increase refcount */
      pb_reference((struct pb_buffer**)&buf, &buf->base);
      return &buf->base;
   }
   
   pipe_mutex_unlock(mgr->mutex);

   buf = CALLOC_STRUCT(pb_cache_buffer);
   if(!buf)
      return NULL;
   
   buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc);
   if(!buf->buffer) {
      FREE(buf);
      return NULL;
   }
   
   assert(pipe_is_referenced(&buf->buffer->base.reference));
   assert(pb_check_alignment(desc->alignment, buf->buffer->base.alignment));
   assert(pb_check_usage(desc->usage, buf->buffer->base.usage));
   assert(buf->buffer->base.size >= size);
   
   pipe_reference_init(&buf->base.base.reference, 1);
   buf->base.base.alignment = buf->buffer->base.alignment;
   buf->base.base.usage = buf->buffer->base.usage;
   buf->base.base.size = buf->buffer->base.size;
   
   buf->base.vtbl = &pb_cache_buffer_vtbl;
   buf->mgr = mgr;
   
   return &buf->base;
}
Esempio n. 24
0
static void radeon_fence_reference(struct pipe_fence_handle **dst,
                                   struct pipe_fence_handle *src)
{
    pb_reference((struct pb_buffer**)dst, (struct pb_buffer*)src);
}
Esempio n. 25
0
struct pb_manager *
pool_bufmgr_create(struct pb_manager *provider, 
                   pb_size numBufs, 
                   pb_size bufSize,
                   const struct pb_desc *desc) 
{
   struct pool_pb_manager *pool;
   struct pool_buffer *pool_buf;
   pb_size i;

   if(!provider)
      return NULL;
   
   pool = CALLOC_STRUCT(pool_pb_manager);
   if (!pool)
      return NULL;

   pool->base.destroy = pool_bufmgr_destroy;
   pool->base.create_buffer = pool_bufmgr_create_buffer;
   pool->base.flush = pool_bufmgr_flush;

   LIST_INITHEAD(&pool->free);

   pool->numTot = numBufs;
   pool->numFree = numBufs;
   pool->bufSize = bufSize;
   pool->bufAlign = desc->alignment; 
   
   pipe_mutex_init(pool->mutex);

   pool->buffer = provider->create_buffer(provider, numBufs*bufSize, desc); 
   if (!pool->buffer)
      goto failure;

   pool->map = pb_map(pool->buffer,
                          PB_USAGE_CPU_READ |
                          PB_USAGE_CPU_WRITE, NULL);
   if(!pool->map)
      goto failure;

   pool->bufs = (struct pool_buffer *)CALLOC(numBufs, sizeof(*pool->bufs));
   if (!pool->bufs)
      goto failure;

   pool_buf = pool->bufs;
   for (i = 0; i < numBufs; ++i) {
      pipe_reference_init(&pool_buf->base.reference, 0);
      pool_buf->base.alignment = 0;
      pool_buf->base.usage = 0;
      pool_buf->base.size = bufSize;
      pool_buf->base.vtbl = &pool_buffer_vtbl;
      pool_buf->mgr = pool;
      pool_buf->start = i * bufSize;
      LIST_ADDTAIL(&pool_buf->head, &pool->free);
      pool_buf++;
   }

   return SUPER(pool);
   
failure:
   if(pool->bufs)
      FREE(pool->bufs);
   if(pool->map)
      pb_unmap(pool->buffer);
   if(pool->buffer)
      pb_reference(&pool->buffer, NULL);
   if(pool)
      FREE(pool);
   return NULL;
}
Esempio n. 26
0
bool r600_init_resource(struct r600_common_screen *rscreen,
			struct r600_resource *res,
			unsigned size, unsigned alignment,
			bool use_reusable_pool)
{
	struct r600_texture *rtex = (struct r600_texture*)res;
	struct pb_buffer *old_buf, *new_buf;

	switch (res->b.b.usage) {
	case PIPE_USAGE_STAGING:
	case PIPE_USAGE_DYNAMIC:
	case PIPE_USAGE_STREAM:
		/* Transfers are likely to occur more often with these resources. */
		res->domains = RADEON_DOMAIN_GTT;
		break;
	case PIPE_USAGE_DEFAULT:
	case PIPE_USAGE_IMMUTABLE:
	default:
		/* Not listing GTT here improves performance in some apps. */
		res->domains = RADEON_DOMAIN_VRAM;
		break;
	}

	/* Use GTT for all persistent mappings, because they are
	 * always cached and coherent. */
	if (res->b.b.target == PIPE_BUFFER &&
	    res->b.b.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
			      PIPE_RESOURCE_FLAG_MAP_COHERENT)) {
		res->domains = RADEON_DOMAIN_GTT;
	}

	/* Tiled textures are unmappable. Always put them in VRAM. */
	if (res->b.b.target != PIPE_BUFFER &&
	    rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D) {
		res->domains = RADEON_DOMAIN_VRAM;
	}

	/* Allocate a new resource. */
	new_buf = rscreen->ws->buffer_create(rscreen->ws, size, alignment,
					     use_reusable_pool,
					     res->domains);
	if (!new_buf) {
		return false;
	}

	/* Replace the pointer such that if res->buf wasn't NULL, it won't be
	 * NULL. This should prevent crashes with multiple contexts using
	 * the same buffer where one of the contexts invalidates it while
	 * the others are using it. */
	old_buf = res->buf;
	res->cs_buf = rscreen->ws->buffer_get_cs_handle(new_buf); /* should be atomic */
	res->buf = new_buf; /* should be atomic */
	pb_reference(&old_buf, NULL);

	util_range_set_empty(&res->valid_buffer_range);

	if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {
		fprintf(stderr, "VM start=0x%"PRIu64"  end=0x%"PRIu64" | Buffer %u bytes\n",
			r600_resource_va(&rscreen->b, &res->b.b),
			r600_resource_va(&rscreen->b, &res->b.b) + res->buf->size,
			res->buf->size);
	}
	return true;
}
Esempio n. 27
0
static void *r600_buffer_transfer_map(struct pipe_context *ctx,
					struct pipe_resource *resource,
					unsigned level,
					unsigned usage,
					const struct pipe_box *box,
					struct pipe_transfer **ptransfer)
{
	struct r600_context *rctx = (struct r600_context*)ctx;
	struct r600_resource *rbuffer = r600_resource(resource);
	uint8_t *data;

	assert(box->x + box->width <= resource->width0);

	/* See if the buffer range being mapped has never been initialized,
	 * in which case it can be mapped unsynchronized. */
	if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
	    usage & PIPE_TRANSFER_WRITE &&
	    !util_ranges_intersect(&rbuffer->valid_buffer_range, box->x, box->x + box->width)) {
		usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
	}

	if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
	    !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
		assert(usage & PIPE_TRANSFER_WRITE);

		/* Check if mapping this buffer would cause waiting for the GPU. */
		if (r600_rings_is_buffer_referenced(&rctx->b, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
		    rctx->b.ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
			unsigned i, mask;

			/* Discard the buffer. */
			pb_reference(&rbuffer->buf, NULL);

			/* Create a new one in the same pipe_resource. */
			/* XXX We probably want a different alignment for buffers and textures. */
			r600_init_resource(&rctx->screen->b, rbuffer, rbuffer->b.b.width0, 4096,
					   TRUE, rbuffer->b.b.usage);

			/* We changed the buffer, now we need to bind it where the old one was bound. */
			/* Vertex buffers. */
			mask = rctx->vertex_buffer_state.enabled_mask;
			while (mask) {
				i = u_bit_scan(&mask);
				if (rctx->vertex_buffer_state.vb[i].buffer == &rbuffer->b.b) {
					rctx->vertex_buffer_state.dirty_mask |= 1 << i;
					r600_vertex_buffers_dirty(rctx);
				}
			}
			/* Streamout buffers. */
			for (i = 0; i < rctx->b.streamout.num_targets; i++) {
				if (rctx->b.streamout.targets[i]->b.buffer == &rbuffer->b.b) {
					if (rctx->b.streamout.begin_emitted) {
						r600_emit_streamout_end(&rctx->b);
					}
					rctx->b.streamout.append_bitmask = rctx->b.streamout.enabled_mask;
					r600_streamout_buffers_dirty(&rctx->b);
				}
			}
			/* Constant buffers. */
			r600_set_constants_dirty_if_bound(rctx, rbuffer);
		}
	}
	else if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
		 !(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
		 !(rctx->screen->b.debug_flags & DBG_NO_DISCARD_RANGE) &&
		 (rctx->screen->has_cp_dma ||
		  (rctx->screen->has_streamout &&
		   /* The buffer range must be aligned to 4 with streamout. */
		   box->x % 4 == 0 && box->width % 4 == 0))) {
		assert(usage & PIPE_TRANSFER_WRITE);

		/* Check if mapping this buffer would cause waiting for the GPU. */
		if (r600_rings_is_buffer_referenced(&rctx->b, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
		    rctx->b.ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
			/* Do a wait-free write-only transfer using a temporary buffer. */
			unsigned offset;
			struct r600_resource *staging = NULL;

			u_upload_alloc(rctx->uploader, 0, box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT),
				       &offset, (struct pipe_resource**)&staging, (void**)&data);

			if (staging) {
				data += box->x % R600_MAP_BUFFER_ALIGNMENT;
				return r600_buffer_get_transfer(ctx, resource, level, usage, box,
								ptransfer, data, staging, offset);
			}
		}
	}

	/* mmap and synchronize with rings */
	data = r600_buffer_map_sync_with_rings(&rctx->b, rbuffer, usage);
	if (!data) {
		return NULL;
	}
	data += box->x;

	return r600_buffer_get_transfer(ctx, resource, level, usage, box,
					ptransfer, data, NULL, 0);
}
Esempio n. 28
0
static void *r600_buffer_transfer_map(struct pipe_context *pipe,
				      struct pipe_transfer *transfer)
{
	struct r600_resource *rbuffer = r600_resource(transfer->resource);
	struct r600_context *rctx = (struct r600_context*)pipe;
	uint8_t *data;

	if (transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
	    !(transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
		assert(transfer->usage & PIPE_TRANSFER_WRITE);

		/* Check if mapping this buffer would cause waiting for the GPU. */
		if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
		    rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
			unsigned i, mask;

			/* Discard the buffer. */
			pb_reference(&rbuffer->buf, NULL);

			/* Create a new one in the same pipe_resource. */
			/* XXX We probably want a different alignment for buffers and textures. */
			r600_init_resource(rctx->screen, rbuffer, rbuffer->b.b.width0, 4096,
					   rbuffer->b.b.bind, rbuffer->b.b.usage);

			/* We changed the buffer, now we need to bind it where the old one was bound. */
			/* Vertex buffers. */
			mask = rctx->vertex_buffer_state.enabled_mask;
			while (mask) {
				i = u_bit_scan(&mask);
				if (rctx->vertex_buffer_state.vb[i].buffer == &rbuffer->b.b) {
					rctx->vertex_buffer_state.dirty_mask |= 1 << i;
					r600_vertex_buffers_dirty(rctx);
				}
			}
			/* Streamout buffers. */
			for (i = 0; i < rctx->num_so_targets; i++) {
				if (rctx->so_targets[i]->b.buffer == &rbuffer->b.b) {
					r600_context_streamout_end(rctx);
					rctx->streamout_start = TRUE;
					rctx->streamout_append_bitmask = ~0;
				}
			}
			/* Constant buffers. */
			r600_set_constants_dirty_if_bound(rctx, &rctx->vs_constbuf_state, rbuffer);
			r600_set_constants_dirty_if_bound(rctx, &rctx->ps_constbuf_state, rbuffer);
		}
	}
#if 0 /* this is broken (see Bug 53130) */
	else if ((transfer->usage & PIPE_TRANSFER_DISCARD_RANGE) &&
		 !(transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
		 rctx->screen->has_streamout &&
		 /* The buffer range must be aligned to 4. */
		 transfer->box.x % 4 == 0 && transfer->box.width % 4 == 0) {
		assert(transfer->usage & PIPE_TRANSFER_WRITE);

		/* Check if mapping this buffer would cause waiting for the GPU. */
		if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
		    rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
			/* Do a wait-free write-only transfer using a temporary buffer. */
			struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;

			rtransfer->staging = (struct r600_resource*)
				pipe_buffer_create(pipe->screen, PIPE_BIND_VERTEX_BUFFER,
						   PIPE_USAGE_STAGING, transfer->box.width);
			return rctx->ws->buffer_map(rtransfer->staging->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
		}
	}
#endif

	data = rctx->ws->buffer_map(rbuffer->cs_buf, rctx->cs, transfer->usage);
	if (!data)
		return NULL;

	return (uint8_t*)data + transfer->box.x;
}
bool r600_init_resource(struct r600_common_screen *rscreen,
			struct r600_resource *res,
			unsigned size, unsigned alignment,
			bool use_reusable_pool)
{
	struct r600_texture *rtex = (struct r600_texture*)res;
	struct pb_buffer *old_buf, *new_buf;
	enum radeon_bo_flag flags = 0;

	switch (res->b.b.usage) {
	case PIPE_USAGE_STREAM:
		flags = RADEON_FLAG_GTT_WC;
		/* fall through */
	case PIPE_USAGE_STAGING:
		/* Transfers are likely to occur more often with these resources. */
		res->domains = RADEON_DOMAIN_GTT;
		break;
	case PIPE_USAGE_DYNAMIC:
		/* Older kernels didn't always flush the HDP cache before
		 * CS execution
		 */
		if (rscreen->info.drm_major == 2 &&
		    rscreen->info.drm_minor < 40) {
			res->domains = RADEON_DOMAIN_GTT;
			flags |= RADEON_FLAG_GTT_WC;
			break;
		}
		flags |= RADEON_FLAG_CPU_ACCESS;
		/* fall through */
	case PIPE_USAGE_DEFAULT:
	case PIPE_USAGE_IMMUTABLE:
	default:
		/* Not listing GTT here improves performance in some apps. */
		res->domains = RADEON_DOMAIN_VRAM;
		flags |= RADEON_FLAG_GTT_WC;
		break;
	}

	if (res->b.b.target == PIPE_BUFFER &&
	    res->b.b.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
			      PIPE_RESOURCE_FLAG_MAP_COHERENT)) {
		/* Use GTT for all persistent mappings with older kernels,
		 * because they didn't always flush the HDP cache before CS
		 * execution.
		 *
		 * Write-combined CPU mappings are fine, the kernel ensures all CPU
		 * writes finish before the GPU executes a command stream.
		 */
		if (rscreen->info.drm_major == 2 &&
		    rscreen->info.drm_minor < 40)
			res->domains = RADEON_DOMAIN_GTT;
		else if (res->domains & RADEON_DOMAIN_VRAM)
			flags |= RADEON_FLAG_CPU_ACCESS;
	}

	/* Tiled textures are unmappable. Always put them in VRAM. */
	if (res->b.b.target != PIPE_BUFFER &&
	    rtex->surface.level[0].mode >= RADEON_SURF_MODE_1D) {
		res->domains = RADEON_DOMAIN_VRAM;
		flags &= ~RADEON_FLAG_CPU_ACCESS;
		flags |= RADEON_FLAG_NO_CPU_ACCESS;
	}

	if (rscreen->debug_flags & DBG_NO_WC)
		flags &= ~RADEON_FLAG_GTT_WC;

	/* Allocate a new resource. */
	new_buf = rscreen->ws->buffer_create(rscreen->ws, size, alignment,
					     use_reusable_pool,
					     res->domains, flags);
	if (!new_buf) {
		return false;
	}

	/* Replace the pointer such that if res->buf wasn't NULL, it won't be
	 * NULL. This should prevent crashes with multiple contexts using
	 * the same buffer where one of the contexts invalidates it while
	 * the others are using it. */
	old_buf = res->buf;
	res->buf = new_buf; /* should be atomic */

	if (rscreen->info.has_virtual_memory)
		res->gpu_address = rscreen->ws->buffer_get_virtual_address(res->buf);
	else
		res->gpu_address = 0;

	pb_reference(&old_buf, NULL);

	util_range_set_empty(&res->valid_buffer_range);
	res->TC_L2_dirty = false;

	if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {
		fprintf(stderr, "VM start=0x%"PRIX64"  end=0x%"PRIX64" | Buffer %u bytes\n",
			res->gpu_address, res->gpu_address + res->buf->size,
			res->buf->size);
	}
	return true;
}
Esempio n. 30
0
static void *
r300_buffer_transfer_map( struct pipe_context *context,
                          struct pipe_resource *resource,
                          unsigned level,
                          unsigned usage,
                          const struct pipe_box *box,
                          struct pipe_transfer **ptransfer )
{
    struct r300_context *r300 = r300_context(context);
    struct radeon_winsys *rws = r300->screen->rws;
    struct r300_resource *rbuf = r300_resource(resource);
    struct pipe_transfer *transfer;
    uint8_t *map;

    transfer = util_slab_alloc(&r300->pool_transfers);
    transfer->resource = resource;
    transfer->level = level;
    transfer->usage = usage;
    transfer->box = *box;
    transfer->stride = 0;
    transfer->layer_stride = 0;

    if (rbuf->malloced_buffer) {
        *ptransfer = transfer;
        return rbuf->malloced_buffer + box->x;
    }

    if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
        !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
        assert(usage & PIPE_TRANSFER_WRITE);

        /* Check if mapping this buffer would cause waiting for the GPU. */
        if (r300->rws->cs_is_buffer_referenced(r300->cs, rbuf->cs_buf, RADEON_USAGE_READWRITE) ||
            !r300->rws->buffer_wait(rbuf->buf, 0, RADEON_USAGE_READWRITE)) {
            unsigned i;
            struct pb_buffer *new_buf;

            /* Create a new one in the same pipe_resource. */
            new_buf = r300->rws->buffer_create(r300->rws, rbuf->b.b.width0,
                                               R300_BUFFER_ALIGNMENT, TRUE,
                                               rbuf->domain, 0);
            if (new_buf) {
                /* Discard the old buffer. */
                pb_reference(&rbuf->buf, NULL);
                rbuf->buf = new_buf;
                rbuf->cs_buf = r300->rws->buffer_get_cs_handle(rbuf->buf);

                /* We changed the buffer, now we need to bind it where the old one was bound. */
                for (i = 0; i < r300->nr_vertex_buffers; i++) {
                    if (r300->vertex_buffer[i].buffer == &rbuf->b.b) {
                        r300->vertex_arrays_dirty = TRUE;
                        break;
                    }
                }
            }
        }
    }

    /* Buffers are never used for write, therefore mapping for read can be
     * unsynchronized. */
    if (!(usage & PIPE_TRANSFER_WRITE)) {
       usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
    }

    map = rws->buffer_map(rbuf->cs_buf, r300->cs, usage);

    if (map == NULL) {
        util_slab_free(&r300->pool_transfers, transfer);
        return NULL;
    }

    *ptransfer = transfer;
    return map + box->x;
}