Exemplo n.º 1
0
int r600_texture_from_depth(struct pipe_context *ctx, struct r600_resource_texture *rtexture, unsigned level)
{
	struct r600_screen *rscreen = r600_screen(ctx->screen);
	int r;

	if (!rtexture->depth) {
		/* This shouldn't happen maybe print a warning */
		return 0;
	}
	if (rtexture->uncompressed && !rtexture->dirty) {
		/* Uncompressed bo already in good state */
		return 0;
	}

	/* allocate uncompressed texture */
	if (rtexture->uncompressed == NULL) {
		rtexture->uncompressed = radeon_bo(rscreen->rw, 0, rtexture->size, 4096, NULL);
		if (rtexture->uncompressed == NULL) {
			return -ENOMEM;
		}
	}

	/* render a rectangle covering whole buffer to uncompress depth */
	r = r600_blit_uncompress_depth(ctx, rtexture, level);
	if (r) {
		return r;
	}

	rtexture->dirty = 0;
	return 0;
}
Exemplo n.º 2
0
struct pipe_resource *r600_texture_create(struct pipe_screen *screen,
						const struct pipe_resource *templ)
{
	struct r600_resource_texture *rtex;
	struct r600_resource *resource;
	struct r600_screen *rscreen = r600_screen(screen);

	rtex = CALLOC_STRUCT(r600_resource_texture);
	if (!rtex) {
		return NULL;
	}
	resource = &rtex->resource;
	resource->base.b = *templ;
	resource->base.vtbl = &r600_texture_vtbl;
	pipe_reference_init(&resource->base.b.reference, 1);
	resource->base.b.screen = screen;
	r600_setup_miptree(rscreen, rtex);

	/* FIXME alignment 4096 enought ? too much ? */
	resource->domain = r600_domain_from_usage(resource->base.b.bind);
	resource->bo = radeon_bo(rscreen->rw, 0, rtex->size, 4096, NULL);
	if (resource->bo == NULL) {
		FREE(rtex);
		return NULL;
	}
	return &resource->base.b;
}
Exemplo n.º 3
0
static struct radeon_bo *get_radeon_bo(struct pb_buffer *_buf)
{
    struct radeon_bo *bo = NULL;

    if (_buf->vtbl == &radeon_bo_vtbl) {
        bo = radeon_bo(_buf);
    } else {
	struct pb_buffer *base_buf;
	pb_size offset;
	pb_get_base_buffer(_buf, &base_buf, &offset);

        if (base_buf->vtbl == &radeon_bo_vtbl)
            bo = radeon_bo(base_buf);
    }

    return bo;
}
Exemplo n.º 4
0
/* This is for the cache bufmgr. */
static boolean radeon_bomgr_is_buffer_busy(struct pb_manager *_mgr,
                                           struct pb_buffer *_buf)
{
   struct radeon_bo *bo = radeon_bo(_buf);

   if (radeon_bo_is_referenced_by_any_cs(bo)) {
       return TRUE;
   }

   if (radeon_bo_is_busy((struct pb_buffer*)bo, RADEON_USAGE_READWRITE)) {
       return TRUE;
   }

   return FALSE;
}
Exemplo n.º 5
0
struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
					       const struct pipe_resource *templ,
					       struct winsys_handle *whandle)
{
	struct radeon *rw = (struct radeon*)screen->winsys;
	struct r600_resource_texture *rtex;
	struct r600_resource *resource;
	struct radeon_bo *bo = NULL;

	/* Support only 2D textures without mipmaps */
	if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
	      templ->depth0 != 1 || templ->last_level != 0)
		return NULL;

	rtex = CALLOC_STRUCT(r600_resource_texture);
	if (rtex == NULL)
		return NULL;

	bo = radeon_bo(rw, whandle->handle, 0, 0, NULL);
	if (bo == NULL) {
		FREE(rtex);
		return NULL;
	}

	resource = &rtex->resource;
	resource->base.b = *templ;
	resource->base.vtbl = &r600_texture_vtbl;
	pipe_reference_init(&resource->base.b.reference, 1);
	resource->base.b.screen = screen;
	resource->bo = bo;
	rtex->depth = 0;
	rtex->pitch_override = whandle->stride;
	rtex->bpt = util_format_get_blocksize(templ->format);
	rtex->pitch[0] = whandle->stride;
	rtex->width[0] = templ->width0;
	rtex->height[0] = templ->height0;
	rtex->offset[0] = 0;
	rtex->size = align(rtex->pitch[0] * templ->height0, 64);

	return &resource->base.b;
}
Exemplo n.º 6
0
static void radeon_bo_destroy(struct pb_buffer *_buf)
{
    struct radeon_bo *bo = radeon_bo(_buf);
    struct drm_gem_close args = {};

    if (bo->name) {
        pipe_mutex_lock(bo->mgr->bo_handles_mutex);
        util_hash_table_remove(bo->mgr->bo_handles,
			       (void*)(uintptr_t)bo->name);
        pipe_mutex_unlock(bo->mgr->bo_handles_mutex);
    }

    if (bo->ptr)
        munmap(bo->ptr, bo->size);

    /* Close object. */
    args.handle = bo->handle;
    drmIoctl(bo->rws->fd, DRM_IOCTL_GEM_CLOSE, &args);
    pipe_mutex_destroy(bo->map_mutex);
    FREE(bo);
}
Exemplo n.º 7
0
static void *radeon_bo_map_internal(struct pb_buffer *_buf,
                                    unsigned flags, void *flush_ctx)
{
    struct radeon_bo *bo = radeon_bo(_buf);
    struct radeon_drm_cs *cs = flush_ctx;
    struct drm_radeon_gem_mmap args = {};
    void *ptr;

    /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
    if (!(flags & PB_USAGE_UNSYNCHRONIZED)) {
        /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
        if (flags & PB_USAGE_DONTBLOCK) {
            if (!(flags & PB_USAGE_CPU_WRITE)) {
                /* Mapping for read.
                 *
                 * Since we are mapping for read, we don't need to wait
                 * if the GPU is using the buffer for read too
                 * (neither one is changing it).
                 *
                 * Only check whether the buffer is being used for write. */
                if (radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
                    cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
                    return NULL;
                }

                if (radeon_bo_is_busy((struct pb_buffer*)bo,
                                      RADEON_USAGE_WRITE)) {
                    return NULL;
                }
            } else {
                if (radeon_bo_is_referenced_by_cs(cs, bo)) {
                    cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
                    return NULL;
                }

                if (radeon_bo_is_busy((struct pb_buffer*)bo,
                                      RADEON_USAGE_READWRITE)) {
                    return NULL;
                }
            }
        } else {
            if (!(flags & PB_USAGE_CPU_WRITE)) {
                /* Mapping for read.
                 *
                 * Since we are mapping for read, we don't need to wait
                 * if the GPU is using the buffer for read too
                 * (neither one is changing it).
                 *
                 * Only check whether the buffer is being used for write. */
                if (radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
                    cs->flush_cs(cs->flush_data, 0);
                }
                radeon_bo_wait((struct pb_buffer*)bo,
                               RADEON_USAGE_WRITE);
            } else {
                /* Mapping for write. */
                if (radeon_bo_is_referenced_by_cs(cs, bo)) {
                    cs->flush_cs(cs->flush_data, 0);
                } else {
                    /* Try to avoid busy-waiting in radeon_bo_wait. */
                    if (p_atomic_read(&bo->num_active_ioctls))
                        radeon_drm_cs_sync_flush(cs);
                }

                radeon_bo_wait((struct pb_buffer*)bo, RADEON_USAGE_READWRITE);
            }
        }
    }

    /* Return the pointer if it's already mapped. */
    if (bo->ptr)
        return bo->ptr;

    /* Map the buffer. */
    pipe_mutex_lock(bo->map_mutex);
    /* Return the pointer if it's already mapped (in case of a race). */
    if (bo->ptr) {
        pipe_mutex_unlock(bo->map_mutex);
        return bo->ptr;
    }
    args.handle = bo->handle;
    args.offset = 0;
    args.size = (uint64_t)bo->size;
    if (drmCommandWriteRead(bo->rws->fd,
                            DRM_RADEON_GEM_MMAP,
                            &args,
                            sizeof(args))) {
        pipe_mutex_unlock(bo->map_mutex);
        fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n",
                bo, bo->handle);
        return NULL;
    }

    ptr = mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
               bo->rws->fd, args.addr_ptr);
    if (ptr == MAP_FAILED) {
        pipe_mutex_unlock(bo->map_mutex);
        fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno);
        return NULL;
    }
    bo->ptr = ptr;
    pipe_mutex_unlock(bo->map_mutex);

    return bo->ptr;
}