Example #1
0
static bool
fd_end_query(struct pipe_context *pctx, struct pipe_query *pq)
{
	struct fd_query *q = fd_query(pq);
	q->funcs->end_query(fd_context(pctx), q);
	return true;
}
Example #2
0
static void
fd_context_next_rb(struct pipe_context *pctx)
{
	struct fd_context *ctx = fd_context(pctx);
	struct fd_ringbuffer *ring;

	fd_ringmarker_del(ctx->draw_start);
	fd_ringmarker_del(ctx->draw_end);

	ring = next_rb(ctx);

	ctx->draw_start = fd_ringmarker_new(ring);
	ctx->draw_end = fd_ringmarker_new(ring);

	fd_ringbuffer_set_parent(ring, NULL);
	ctx->ring = ring;

	fd_ringmarker_del(ctx->binning_start);
	fd_ringmarker_del(ctx->binning_end);

	ring = next_rb(ctx);

	ctx->binning_start = fd_ringmarker_new(ring);
	ctx->binning_end = fd_ringmarker_new(ring);

	fd_ringbuffer_set_parent(ring, ctx->ring);
	ctx->binning_ring = ring;
}
/**
 * emit marker string as payload of a no-op packet, which can be
 * decoded by cffdump.
 */
static void
fd_emit_string_marker(struct pipe_context *pctx, const char *string, int len)
{
	struct fd_context *ctx = fd_context(pctx);
	struct fd_ringbuffer *ring;
	const uint32_t *buf = (const void *)string;

	if (!ctx->batch)
		return;

	ring = ctx->batch->draw;

	/* max packet size is 0x3fff dwords: */
	len = MIN2(len, 0x3fff * 4);

	if (ctx->screen->gpu_id >= 500)
		OUT_PKT7(ring, CP_NOP, align(len, 4) / 4);
	else
		OUT_PKT3(ring, CP_NOP, align(len, 4) / 4);
	while (len >= 4) {
		OUT_RING(ring, *buf);
		buf++;
		len -= 4;
	}

	/* copy remainder bytes without reading past end of input string: */
	if (len > 0) {
		uint32_t w = 0;
		memcpy(&w, buf, len);
		OUT_RING(ring, w);
	}
}
Example #4
0
void
fd_set_sampler_views(struct pipe_context *pctx, unsigned shader,
		unsigned start, unsigned nr,
		struct pipe_sampler_view **views)
{
	struct fd_context *ctx = fd_context(pctx);

	switch (shader) {
	case PIPE_SHADER_FRAGMENT:
		/* on a2xx, since there is a flat address space for textures/samplers,
		 * a change in # of fragment textures/samplers will trigger patching
		 * and re-emitting the vertex shader:
		 *
		 * (note: later gen's ignore FD_DIRTY_TEXSTATE so fine to set it)
		 */
		if (nr != ctx->fragtex.num_textures)
			ctx->dirty |= FD_DIRTY_TEXSTATE;

		set_sampler_views(&ctx->fragtex, start, nr, views);
		ctx->dirty |= FD_DIRTY_FRAGTEX;
		break;
	case PIPE_SHADER_VERTEX:
		set_sampler_views(&ctx->verttex, start, nr, views);
		ctx->dirty |= FD_DIRTY_VERTTEX;
		break;
	default:
		break;
	}
}
Example #5
0
static void
fd_set_vertex_buffers(struct pipe_context *pctx,
		unsigned start_slot, unsigned count,
		const struct pipe_vertex_buffer *vb)
{
	struct fd_context *ctx = fd_context(pctx);
	struct fd_vertexbuf_stateobj *so = &ctx->vertexbuf;
	int i;

	/* on a2xx, pitch is encoded in the vtx fetch instruction, so
	 * we need to mark VTXSTATE as dirty as well to trigger patching
	 * and re-emitting the vtx shader:
	 */
	for (i = 0; i < count; i++) {
		bool new_enabled = vb && (vb[i].buffer || vb[i].user_buffer);
		bool old_enabled = so->vb[i].buffer || so->vb[i].user_buffer;
		uint32_t new_stride = vb ? vb[i].stride : 0;
		uint32_t old_stride = so->vb[i].stride;
		if ((new_enabled != old_enabled) || (new_stride != old_stride)) {
			ctx->dirty |= FD_DIRTY_VTXSTATE;
			break;
		}
	}

	util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, start_slot, count);
	so->count = util_last_bit(so->enabled_mask);

	ctx->dirty |= FD_DIRTY_VTXBUF;
}
Example #6
0
/* notes from calim on #dri-devel:
 * index==0 will be non-UBO (ie. glUniformXYZ()) all packed together padded
 * out to vec4's
 * I should be able to consider that I own the user_ptr until the next
 * set_constant_buffer() call, at which point I don't really care about the
 * previous values.
 * index>0 will be UBO's.. well, I'll worry about that later
 */
static void
fd_set_constant_buffer(struct pipe_context *pctx, uint shader, uint index,
		struct pipe_constant_buffer *cb)
{
	struct fd_context *ctx = fd_context(pctx);
	struct fd_constbuf_stateobj *so = &ctx->constbuf[shader];

	/* Note that the state tracker can unbind constant buffers by
	 * passing NULL here.
	 */
	if (unlikely(!cb)) {
		so->enabled_mask &= ~(1 << index);
		so->dirty_mask &= ~(1 << index);
		pipe_resource_reference(&so->cb[index].buffer, NULL);
		return;
	}

	pipe_resource_reference(&so->cb[index].buffer, cb->buffer);
	so->cb[index].buffer_offset = cb->buffer_offset;
	so->cb[index].buffer_size   = cb->buffer_size;
	so->cb[index].user_buffer   = cb->user_buffer;

	so->enabled_mask |= 1 << index;
	so->dirty_mask |= 1 << index;
	ctx->dirty |= FD_DIRTY_CONSTBUF;
}
Example #7
0
static void
fd_zsa_state_bind(struct pipe_context *pctx, void *hwcso)
{
	struct fd_context *ctx = fd_context(pctx);
	ctx->zsa = hwcso;
	ctx->dirty |= FD_DIRTY_ZSA;
}
Example #8
0
static void *
fd_resource_transfer_map(struct pipe_context *pctx,
		struct pipe_resource *prsc,
		unsigned level, unsigned usage,
		const struct pipe_box *box,
		struct pipe_transfer **pptrans)
{
	struct fd_context *ctx = fd_context(pctx);
	struct fd_resource *rsc = fd_resource(prsc);
	struct pipe_transfer *ptrans = util_slab_alloc(&ctx->transfer_pool);
	enum pipe_format format = prsc->format;
	char *buf;

	if (!ptrans)
		return NULL;

	ptrans->resource = prsc;
	ptrans->level = level;
	ptrans->usage = usage;
	ptrans->box = *box;
	ptrans->stride = rsc->pitch * rsc->cpp;
	ptrans->layer_stride = ptrans->stride;

	buf = fd_bo_map(rsc->bo);

	*pptrans = ptrans;

	return buf +
		box->y / util_format_get_blockheight(format) * ptrans->stride +
		box->x / util_format_get_blockwidth(format) * rsc->cpp;
}
Example #9
0
static void
fd_blend_state_bind(struct pipe_context *pctx, void *hwcso)
{
	struct fd_context *ctx = fd_context(pctx);
	ctx->blend = hwcso;
	ctx->dirty |= FD_DIRTY_BLEND;
}
Example #10
0
static void
fd_clear(struct pipe_context *pctx, unsigned buffers,
         const union pipe_color_union *color, double depth, unsigned stencil)
{
    struct fd_context *ctx = fd_context(pctx);
    struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
    struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
    unsigned cleared_buffers;

    /* for bookkeeping about which buffers have been cleared (and thus
     * can fully or partially skip mem2gmem) we need to ignore buffers
     * that have already had a draw, in case apps do silly things like
     * clear after draw (ie. if you only clear the color buffer, but
     * something like alpha-test causes side effects from the draw in
     * the depth buffer, etc)
     */
    cleared_buffers = buffers & (FD_BUFFER_ALL & ~ctx->restore);

    /* do we have full-screen scissor? */
    if (!memcmp(scissor, &ctx->disabled_scissor, sizeof(*scissor))) {
        ctx->cleared |= cleared_buffers;
    } else {
        ctx->partial_cleared |= cleared_buffers;
        if (cleared_buffers & PIPE_CLEAR_COLOR)
            ctx->cleared_scissor.color = *scissor;
        if (cleared_buffers & PIPE_CLEAR_DEPTH)
            ctx->cleared_scissor.depth = *scissor;
        if (cleared_buffers & PIPE_CLEAR_STENCIL)
            ctx->cleared_scissor.stencil = *scissor;
    }
    ctx->resolve |= buffers;
    ctx->needs_flush = true;

    if (buffers & PIPE_CLEAR_COLOR)
        fd_resource(pfb->cbufs[0]->texture)->dirty = true;

    if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
        fd_resource(pfb->zsbuf->texture)->dirty = true;
        ctx->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL;
    }

    DBG("%x depth=%f, stencil=%u (%s/%s)", buffers, depth, stencil,
        util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
        util_format_short_name(pipe_surface_format(pfb->zsbuf)));

    fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_CLEAR);

    ctx->clear(ctx, buffers, color, depth, stencil);

    ctx->dirty |= FD_DIRTY_ZSA |
                  FD_DIRTY_VIEWPORT |
                  FD_DIRTY_RASTERIZER |
                  FD_DIRTY_SAMPLE_MASK |
                  FD_DIRTY_PROG |
                  FD_DIRTY_CONSTBUF |
                  FD_DIRTY_BLEND;

    if (fd_mesa_debug & FD_DBG_DCLEAR)
        ctx->dirty = 0xffffffff;
}
Example #11
0
static void
fd_set_min_samples(struct pipe_context *pctx, unsigned min_samples)
{
	struct fd_context *ctx = fd_context(pctx);
	ctx->min_samples = min_samples;
	ctx->dirty |= FD_DIRTY_MIN_SAMPLES;
}
Example #12
0
static void
fd_bind_compute_state(struct pipe_context *pctx, void *state)
{
	struct fd_context *ctx = fd_context(pctx);
	ctx->compute = state;
	ctx->dirty_shader[PIPE_SHADER_COMPUTE] |= FD_DIRTY_SHADER_PROG;
}
Example #13
0
static void
fd_set_stream_output_targets(struct pipe_context *pctx,
		unsigned num_targets, struct pipe_stream_output_target **targets,
		const unsigned *offsets)
{
	struct fd_context *ctx = fd_context(pctx);
	struct fd_streamout_stateobj *so = &ctx->streamout;
	unsigned i;

	debug_assert(num_targets <= ARRAY_SIZE(so->targets));

	for (i = 0; i < num_targets; i++) {
		boolean changed = targets[i] != so->targets[i];
		boolean append = (offsets[i] == (unsigned)-1);

		if (!changed && append)
			continue;

		if (!append)
			so->offsets[i] = offsets[i];

		pipe_so_target_reference(&so->targets[i], targets[i]);
	}

	for (; i < so->num_targets; i++) {
		pipe_so_target_reference(&so->targets[i], NULL);
	}

	so->num_targets = num_targets;

	ctx->dirty |= FD_DIRTY_STREAMOUT;
}
Example #14
0
static void
fd_vertex_state_bind(struct pipe_context *pctx, void *hwcso)
{
	struct fd_context *ctx = fd_context(pctx);
	ctx->vtx.vtx = hwcso;
	ctx->dirty |= FD_DIRTY_VTXSTATE;
}
Example #15
0
static void
fd_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso)
{
	struct fd_context *ctx = fd_context(pctx);
	ctx->rasterizer = hwcso;
	ctx->dirty |= FD_DIRTY_RASTERIZER;
}
Example #16
0
static void
fd_clear(struct pipe_context *pctx, unsigned buffers,
		const union pipe_color_union *color, double depth, unsigned stencil)
{
	struct fd_context *ctx = fd_context(pctx);
	struct pipe_framebuffer_state *pfb = &ctx->framebuffer;

	ctx->cleared |= buffers;
	ctx->resolve |= buffers;
	ctx->needs_flush = true;

	if (buffers & PIPE_CLEAR_COLOR)
		fd_resource(pfb->cbufs[0]->texture)->dirty = true;

	if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
		fd_resource(pfb->zsbuf->texture)->dirty = true;
		ctx->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL;
	}

	DBG("%x depth=%f, stencil=%u (%s/%s)", buffers, depth, stencil,
		util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
		util_format_short_name(pipe_surface_format(pfb->zsbuf)));

	ctx->clear(ctx, buffers, color, depth, stencil);

	ctx->dirty |= FD_DIRTY_ZSA |
			FD_DIRTY_RASTERIZER |
			FD_DIRTY_SAMPLE_MASK |
			FD_DIRTY_PROG |
			FD_DIRTY_CONSTBUF |
			FD_DIRTY_BLEND;

	if (fd_mesa_debug & FD_DBG_DCLEAR)
		ctx->dirty = 0xffffffff;
}
Example #17
0
/**
 * Optimal hardware path for blitting pixels.
 * Scaling, format conversion, up- and downsampling (resolve) are allowed.
 */
static void
fd_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
{
	struct fd_context *ctx = fd_context(pctx);
	struct pipe_blit_info info = *blit_info;

	if (info.src.resource->nr_samples > 1 &&
			info.dst.resource->nr_samples <= 1 &&
			!util_format_is_depth_or_stencil(info.src.resource->format) &&
			!util_format_is_pure_integer(info.src.resource->format)) {
		DBG("color resolve unimplemented");
		return;
	}

	if (util_try_blit_via_copy_region(pctx, &info)) {
		return; /* done */
	}

	if (info.mask & PIPE_MASK_S) {
		DBG("cannot blit stencil, skipping");
		info.mask &= ~PIPE_MASK_S;
	}

	if (!util_blitter_is_blit_supported(ctx->blitter, &info)) {
		DBG("blit unsupported %s -> %s",
				util_format_short_name(info.src.resource->format),
				util_format_short_name(info.dst.resource->format));
		return;
	}

	fd_blitter_pipe_begin(ctx);
	util_blitter_blit(ctx->blitter, &info);
	fd_blitter_pipe_end(ctx);
}
Example #18
0
static void
fd_resource_transfer_unmap(struct pipe_context *pctx,
		struct pipe_transfer *ptrans)
{
	struct fd_context *ctx = fd_context(pctx);
	util_slab_free(&ctx->transfer_pool, ptrans);
}
Example #19
0
static void
fd_resource_transfer_unmap(struct pipe_context *pctx,
		struct pipe_transfer *ptrans)
{
	struct fd_context *ctx = fd_context(pctx);
	struct fd_resource *rsc = fd_resource(ptrans->resource);
	struct fd_transfer *trans = fd_transfer(ptrans);

	if (trans->staging && !(ptrans->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
		struct pipe_box box;
		u_box_2d(0, 0, ptrans->box.width, ptrans->box.height, &box);
		fd_resource_flush(trans, &box);
	}

	if (!(ptrans->usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
		fd_bo_cpu_fini(rsc->bo);
		if (rsc->stencil)
			fd_bo_cpu_fini(rsc->stencil->bo);
	}

	util_range_add(&rsc->valid_buffer_range,
				   ptrans->box.x,
				   ptrans->box.x + ptrans->box.width);

	pipe_resource_reference(&ptrans->resource, NULL);
	util_slab_free(&ctx->transfer_pool, ptrans);

	free(trans->staging);
}
Example #20
0
static void
fd_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
{
	struct fd_context *ctx = fd_context(pctx);
	ctx->sample_mask = (uint16_t)sample_mask;
	ctx->dirty |= FD_DIRTY_SAMPLE_MASK;
}
Example #21
0
/**
 * Copy a block of pixels from one resource to another.
 * The resource must be of the same format.
 * Resources with nr_samples > 1 are not allowed.
 */
static void
fd_resource_copy_region(struct pipe_context *pctx,
		struct pipe_resource *dst,
		unsigned dst_level,
		unsigned dstx, unsigned dsty, unsigned dstz,
		struct pipe_resource *src,
		unsigned src_level,
		const struct pipe_box *src_box)
{
	struct fd_context *ctx = fd_context(pctx);

	/* TODO if we have 2d core, or other DMA engine that could be used
	 * for simple copies and reasonably easily synchronized with the 3d
	 * core, this is where we'd plug it in..
	 */

	/* try blit on 3d pipe: */
	if (fd_blitter_pipe_copy_region(ctx,
			dst, dst_level, dstx, dsty, dstz,
			src, src_level, src_box))
		return;

	/* else fallback to pure sw: */
	util_resource_copy_region(pctx,
			dst, dst_level, dstx, dsty, dstz,
			src, src_level, src_box);
}
static void
fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
		unsigned flags)
{
	struct fd_context *ctx = fd_context(pctx);
	struct pipe_fence_handle *fence = NULL;

	DBG("%p: flush: flags=%x\n", ctx->batch, flags);

	/* Take a ref to the batch's fence (batch can be unref'd when flushed: */
	fd_fence_ref(pctx->screen, &fence, ctx->batch->fence);

	if (flags & PIPE_FLUSH_FENCE_FD)
		ctx->batch->needs_out_fence_fd = true;

	if (!ctx->screen->reorder) {
		fd_batch_flush(ctx->batch, true, false);
	} else if (flags & PIPE_FLUSH_DEFERRED) {
		fd_bc_flush_deferred(&ctx->screen->batch_cache, ctx);
	} else {
		fd_bc_flush(&ctx->screen->batch_cache, ctx);
	}

	if (fencep)
		fd_fence_ref(pctx->screen, fencep, fence);

	fd_fence_ref(pctx->screen, &fence, NULL);
}
void fd_prog_init(struct pipe_context *pctx)
{
	struct fd_context *ctx = fd_context(pctx);
	int i;

	pctx->bind_fs_state = fd_fp_state_bind;
	pctx->bind_vs_state = fd_vp_state_bind;

	// XXX for now, let a2xx keep it's own hand-rolled shaders
	// for solid and blit progs:
	if (ctx->screen->gpu_id < 300)
		return;

	ctx->solid_prog.fp = assemble_tgsi(pctx, solid_fp, true);
	ctx->solid_prog.vp = assemble_tgsi(pctx, solid_vp, false);
	ctx->blit_prog[0].vp = assemble_tgsi(pctx, blit_vp, false);
	ctx->blit_prog[0].fp = fd_prog_blit(pctx, 1, false);
	for (i = 1; i < ctx->screen->max_rts; i++) {
		ctx->blit_prog[i].vp = ctx->blit_prog[0].vp;
		ctx->blit_prog[i].fp = fd_prog_blit(pctx, i + 1, false);
	}

	ctx->blit_z.vp = ctx->blit_prog[0].vp;
	ctx->blit_z.fp = fd_prog_blit(pctx, 0, true);
	ctx->blit_zs.vp = ctx->blit_prog[0].vp;
	ctx->blit_zs.fp = fd_prog_blit(pctx, 1, true);
}
Example #24
0
static void
fd5_set_sampler_views(struct pipe_context *pctx, enum pipe_shader_type shader,
		unsigned start, unsigned nr,
		struct pipe_sampler_view **views)
{
	struct fd_context *ctx = fd_context(pctx);
	struct fd5_context *fd5_ctx = fd5_context(ctx);
	uint16_t astc_srgb = 0;
	unsigned i;

	for (i = 0; i < nr; i++) {
		if (views[i]) {
			struct fd5_pipe_sampler_view *view =
					fd5_pipe_sampler_view(views[i]);
			if (view->astc_srgb)
				astc_srgb |= (1 << i);
		}
	}

	fd_set_sampler_views(pctx, shader, start, nr, views);

	if (shader == PIPE_SHADER_FRAGMENT) {
		fd5_ctx->fastc_srgb = astc_srgb;
	} else if (shader == PIPE_SHADER_VERTEX) {
		fd5_ctx->vastc_srgb = astc_srgb;
	}
}
Example #25
0
/* emit accumulated render cmds, needed for example if render target has
 * changed, or for flush()
 */
void
fd_context_render(struct pipe_context *pctx)
{
	struct fd_context *ctx = fd_context(pctx);
	struct pipe_framebuffer_state *pfb = &ctx->framebuffer;

	DBG("needs_flush: %d", ctx->needs_flush);

	if (!ctx->needs_flush)
		return;

	fd_gmem_render_tiles(pctx);

	DBG("%p/%p/%p", ctx->ring->start, ctx->ring->cur, ctx->ring->end);

	/* if size in dwords is more than half the buffer size, then wait and
	 * wrap around:
	 */
	if ((ctx->ring->cur - ctx->ring->start) > ctx->ring->size/8)
		fd_context_next_rb(pctx);

	ctx->needs_flush = false;
	ctx->cleared = ctx->restore = ctx->resolve = 0;
	ctx->gmem_reason = 0;
	ctx->num_draws = 0;

	if (pfb->cbufs[0])
		fd_resource(pfb->cbufs[0]->texture)->dirty = false;
	if (pfb->zsbuf)
		fd_resource(pfb->zsbuf->texture)->dirty = false;
}
Example #26
0
static void
fd_clear(struct pipe_context *pctx, unsigned buffers,
		const union pipe_color_union *color, double depth, unsigned stencil)
{
	struct fd_context *ctx = fd_context(pctx);
	struct fd_ringbuffer *ring = ctx->ring;
	struct pipe_framebuffer_state *fb = &ctx->framebuffer.base;
	uint32_t reg, colr = 0;

	ctx->cleared |= buffers;
	ctx->resolve |= buffers;
	ctx->needs_flush = true;

	if (buffers & PIPE_CLEAR_COLOR)
		fd_resource(fb->cbufs[0]->texture)->dirty = true;

	if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL))
		fd_resource(fb->zsbuf->texture)->dirty = true;

	DBG("depth=%f, stencil=%u", depth, stencil);

	if ((buffers & PIPE_CLEAR_COLOR) && fb->nr_cbufs)
		colr  = pack_rgba(fb->cbufs[0]->format, color->f);

	/* emit generic state now: */
	fd_state_emit(pctx, ctx->dirty &
			(FD_DIRTY_BLEND | FD_DIRTY_VIEWPORT |
					FD_DIRTY_FRAMEBUFFER | FD_DIRTY_SCISSOR));

	fd_emit_vertex_bufs(ring, 0x9c, (struct fd_vertex_buf[]) {
			{ .prsc = ctx->solid_vertexbuf, .size = 48 },
		}, 1);
Example #27
0
static void
assemble_variant(struct ir3_shader_variant *v)
{
	struct fd_context *ctx = fd_context(v->shader->pctx);
	uint32_t sz, *bin;

	bin = ir3_assemble(v->ir, &v->info, ctx->screen->gpu_id);
	sz = v->info.sizedwords * 4;

	v->bo = fd_bo_new(ctx->dev, sz,
			DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
			DRM_FREEDRENO_GEM_TYPE_KMEM);

	memcpy(fd_bo_map(v->bo), bin, sz);

	free(bin);

	if (ctx->screen->gpu_id >= 400) {
		v->instrlen = v->info.sizedwords / (2 * 16);
	} else {
		v->instrlen = v->info.sizedwords / (2 * 4);
	}

	/* NOTE: if relative addressing is used, we set constlen in
	 * the compiler (to worst-case value) since we don't know in
	 * the assembler what the max addr reg value can be:
	 */
	v->constlen = MAX2(v->constlen, v->info.max_const + 1);

	/* no need to keep the ir around beyond this point: */
	ir3_destroy(v->ir);
	v->ir = NULL;
}
Example #28
0
void
fd6_draw_init(struct pipe_context *pctx)
{
	struct fd_context *ctx = fd_context(pctx);
	ctx->draw_vbo = fd6_draw_vbo;
	ctx->clear = fd6_clear;
}
Example #29
0
static boolean
fd_get_query_result(struct pipe_context *pctx, struct pipe_query *pq,
		boolean wait, union pipe_query_result *result)
{
	struct fd_query *q = fd_query(pq);
	return q->funcs->get_query_result(fd_context(pctx), q, wait, result);
}
Example #30
0
static void
fd_vp_state_bind(struct pipe_context *pctx, void *hwcso)
{
	struct fd_context *ctx = fd_context(pctx);
	ctx->prog.vp = hwcso;
	ctx->prog.dirty |= FD_SHADER_DIRTY_VP;
	ctx->dirty |= FD_DIRTY_PROG;
}