示例#1
0
static void
fd_resource_destroy(struct pipe_screen *pscreen,
		struct pipe_resource *prsc)
{
	struct fd_resource *rsc = fd_resource(prsc);
	if (rsc->bo)
		fd_bo_del(rsc->bo);
	list_delinit(&rsc->list);
	util_range_destroy(&rsc->valid_buffer_range);
	FREE(rsc);
}
示例#2
0
/* lookup a buffer, call w/ table_lock held: */
static struct fd_bo * lookup_bo(void *tbl, uint32_t key)
{
	struct fd_bo *bo = NULL;
	if (!drmHashLookup(tbl, key, (void **)&bo)) {
		/* found, incr refcnt and return: */
		bo = fd_bo_ref(bo);

		/* don't break the bucket if this bo was found in one */
		list_delinit(&bo->list);
	}
	return bo;
}
示例#3
0
void
ilo_end_draw_query(struct ilo_context *ilo, struct ilo_query *q)
{
   ilo_draw_set_owner(ilo);

   /* reclaim the reserved space */
   ilo->draw.cp_owner.reserve -= q->cmd_len >> q->in_pairs;
   assert(ilo->draw.cp_owner.reserve >= 0);

   query_end_bo(ilo, q);

   list_delinit(&q->list);
}
示例#4
0
void
ir3_insert_by_depth(struct ir3_instruction *instr, struct list_head *list)
{
	/* remove from existing spot in list: */
	list_delinit(&instr->node);

	/* find where to re-insert instruction: */
	list_for_each_entry (struct ir3_instruction, pos, list, node) {
		if (pos->depth > instr->depth) {
			list_add(&instr->node, &pos->node);
			return;
		}
	}
	/* if we get here, we didn't find an insertion spot: */
	list_addtail(&instr->node, list);
}
void
etna_resource_used(struct etna_context *ctx, struct pipe_resource *prsc,
                   enum etna_resource_status status)
{
   struct etna_resource *rsc;

   if (!prsc)
      return;

   rsc = etna_resource(prsc);
   rsc->status |= status;

   /* TODO resources can actually be shared across contexts,
    * so I'm not sure a single list-head will do the trick? */
   debug_assert((rsc->pending_ctx == ctx) || !rsc->pending_ctx);
   list_delinit(&rsc->list);
   list_addtail(&rsc->list, &ctx->used_resources);
   rsc->pending_ctx = ctx;
}
示例#6
0
static void
etna_resource_destroy(struct pipe_screen *pscreen, struct pipe_resource *prsc)
{
   struct etna_resource *rsc = etna_resource(prsc);

   if (rsc->bo)
      etna_bo_del(rsc->bo);

   if (rsc->ts_bo)
      etna_bo_del(rsc->ts_bo);

   if (rsc->scanout)
      renderonly_scanout_destroy(rsc->scanout);

   list_delinit(&rsc->list);

   pipe_resource_reference(&rsc->texture, NULL);

   FREE(rsc);
}
static void
realloc_bo(struct fd_resource *rsc, uint32_t size)
{
	struct fd_screen *screen = fd_screen(rsc->base.b.screen);
	uint32_t flags = DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
			DRM_FREEDRENO_GEM_TYPE_KMEM; /* TODO */

	/* if we start using things other than write-combine,
	 * be sure to check for PIPE_RESOURCE_FLAG_MAP_COHERENT
	 */

	if (rsc->bo)
		fd_bo_del(rsc->bo);

	rsc->bo = fd_bo_new(screen->dev, size, flags);
	rsc->timestamp = 0;
	rsc->dirty = rsc->reading = false;
	list_delinit(&rsc->list);
	util_range_set_empty(&rsc->valid_buffer_range);
}
示例#8
0
static void
schedule(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
{
	debug_assert(ctx->block == instr->block);

	/* maybe there is a better way to handle this than just stuffing
	 * a nop.. ideally we'd know about this constraint in the
	 * scheduling and depth calculation..
	 */
	if (ctx->scheduled && is_sfu_or_mem(ctx->scheduled) && is_sfu_or_mem(instr))
		ir3_NOP(ctx->block);

	/* remove from depth list:
	 */
	list_delinit(&instr->node);

	if (writes_addr(instr)) {
		debug_assert(ctx->addr == NULL);
		ctx->addr = instr;
	}

	if (writes_pred(instr)) {
		debug_assert(ctx->pred == NULL);
		ctx->pred = instr;
	}

	instr->flags |= IR3_INSTR_MARK;

	list_addtail(&instr->node, &instr->block->instr_list);
	ctx->scheduled = instr;

	if (writes_addr(instr) || writes_pred(instr) || is_input(instr)) {
		clear_cache(ctx, NULL);
	} else {
		/* invalidate only the necessary entries.. */
		clear_cache(ctx, instr);
	}
}