示例#1
0
static void
nv30_fragprog_upload(struct nv30_context *nv30)
{
   struct nouveau_context *nv = &nv30->base;
   struct nv30_fragprog *fp = nv30->fragprog.program;
   struct pipe_context *pipe = &nv30->base.pipe;

   if (unlikely(!fp->buffer))
      fp->buffer = pipe_buffer_create(pipe->screen, 0, 0, fp->insn_len * 4);

#ifndef PIPE_ARCH_BIG_ENDIAN
   pipe_buffer_write(pipe, fp->buffer, 0, fp->insn_len * 4, fp->insn);
#else
   {
      struct pipe_transfer *transfer;
      uint32_t *map;
      int i;

      map = pipe_buffer_map(pipe, fp->buffer,
                            PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
                            &transfer);
      for (i = 0; i < fp->insn_len; i++)
         *map++ = (fp->insn[i] >> 16) | (fp->insn[i] << 16);
      pipe_buffer_unmap(pipe, transfer);
   }
#endif

   if (nv04_resource(fp->buffer)->domain != NOUVEAU_BO_VRAM)
      nouveau_buffer_migrate(nv, nv04_resource(fp->buffer), NOUVEAU_BO_VRAM);
}
示例#2
0
void si_pm4_upload_indirect_buffer(struct si_context *sctx,
				   struct si_pm4_state *state)
{
	struct pipe_screen *screen = sctx->b.b.screen;
	unsigned aligned_ndw = align(state->ndw, 8);

	/* only supported on CIK and later */
	if (sctx->b.chip_class < CIK)
		return;

	assert(state->ndw);
	assert(aligned_ndw <= SI_PM4_MAX_DW);

	r600_resource_reference(&state->indirect_buffer, NULL);
	/* TODO: this hangs with 1024 or higher alignment on GFX9. */
	state->indirect_buffer = (struct r600_resource*)
		si_aligned_buffer_create(screen, 0,
					 PIPE_USAGE_DEFAULT, aligned_ndw * 4,
					 256);
	if (!state->indirect_buffer)
		return;

	/* Pad the IB to 8 DWs to meet CP fetch alignment requirements. */
	if (sctx->screen->info.gfx_ib_pad_with_type2) {
		for (int i = state->ndw; i < aligned_ndw; i++)
			state->pm4[i] = 0x80000000; /* type2 nop packet */
	} else {
		for (int i = state->ndw; i < aligned_ndw; i++)
			state->pm4[i] = 0xffff1000; /* type3 nop packet */
	}

	pipe_buffer_write(&sctx->b.b, &state->indirect_buffer->b.b,
			  0, aligned_ndw *4, state->pm4);
}
示例#3
0
/* --------------- */
static inline int SetConstantColor(GIDevice *icontext, float r, float g, float b, float a)
{
	if ((icontext->constant == NULL))
	{
		return GI_ERROR;
	}

	Color4F color;
	color.r = r;
	color.g = g;
	color.b = b;
	color.a = a;

	struct pipe_box box;
	box.x = 4;
	box.y = 0;
	box.z = 0;
	box.width = sizeof(color);
	box.height = 1;
	box.depth = 1;

   
	unsigned usage = PIPE_TRANSFER_WRITE;
	pipe_buffer_write(icontext->context, icontext->constant, 64, sizeof(color), &color);
	icontext->context->transfer_inline_write(icontext->context, icontext->constant, 0, usage, &box, (void*)&color, box.width, 0);

	return GI_SUCCESS;
}
示例#4
0
/**
 * Draw screen-aligned textured quad.
 * Note: this isn't especially efficient.
 */
void 
util_draw_texquad(struct pipe_context *pipe, struct cso_context *cso,
                  uint vbuf_slot,
                  float x0, float y0, float x1, float y1, float z)
{
   uint numAttribs = 2, i, j;
   uint vertexBytes = 4 * (4 * numAttribs * sizeof(float));
   struct pipe_resource *vbuf = NULL;  
   float *v = NULL;

   v = MALLOC(vertexBytes);
   if (!v)
      goto out;

   /*
    * Load vertex buffer
    */
   for (i = j = 0; i < 4; i++) {
      v[j + 2] = z;   /* z */
      v[j + 3] = 1.0; /* w */
      v[j + 6] = 0.0; /* r */
      v[j + 7] = 1.0; /* q */
      j += 8;
   }

   v[0] = x0;
   v[1] = y0;
   v[4] = 0.0; /*s*/
   v[5] = 0.0; /*t*/

   v[8] = x1;
   v[9] = y0;
   v[12] = 1.0;
   v[13] = 0.0;

   v[16] = x1;
   v[17] = y1;
   v[20] = 1.0;
   v[21] = 1.0;

   v[24] = x0;
   v[25] = y1;
   v[28] = 0.0;
   v[29] = 1.0;
	 
   vbuf = pipe_buffer_create(pipe->screen, PIPE_BIND_VERTEX_BUFFER,
                             PIPE_USAGE_STAGING, vertexBytes);
   if (!vbuf)
      goto out;
   pipe_buffer_write(pipe, vbuf, 0, vertexBytes, v);

   util_draw_vertex_buffer(pipe, cso, vbuf, vbuf_slot, 0,
                           PIPE_PRIM_TRIANGLE_FAN, 4, 2);

out:
   if (vbuf)
      pipe_resource_reference(&vbuf, NULL);
   
   FREE(v);
}
示例#5
0
/* Set the surface to which the Device context will now render. */
static int galSetRenderTarget(IDevice *pDevice, ISurface *pSurface)
{
	GIDevice *icontext = NULL;
	if ((icontext = (GIDevice*)pDevice) == NULL)
	{
		return GI_ERROR;
	}

	GISurface *isurface = NULL;
	if ((isurface = (GISurface*)pSurface) == NULL)
	{
		return GI_ERROR;
	}

	struct pipe_context *context;
	if ((context = icontext->context) == NULL)
	{
		return GI_ERROR;
	}

	struct pipe_resource *resource;
	if ((resource = isurface->resource) == NULL)
	{
		return GI_ERROR;
	}

	struct pipe_surface *surface;
	if ((surface = isurface->surface) == NULL)
	{
		return GI_ERROR;
	}

	struct pipe_viewport_state viewport;
	memset(&viewport, 0, sizeof(viewport));
	viewport.scale[0] = resource->width0;
	viewport.scale[1] = resource->height0;
	viewport.scale[2] = 1.0f;
	viewport.scale[3] = 1.0f;
	context->set_viewport_state(context, &viewport);

	struct pipe_framebuffer_state state;
	memset(&state, 0, sizeof(state));
	state.nr_cbufs = 1;
	state.width = resource->width0;
	state.height = resource->height0;
	state.cbufs[0] = surface;
	context->set_framebuffer_state(context, &state);

	float transform[4];
	transform[0] = resource->width0;
	transform[1] = resource->height0;
	transform[2] = 1;
	transform[3] = 1;
	pipe_buffer_write(context, icontext->constant, 0, sizeof(transform), transform);

	return GI_SUCCESS;
}
示例#6
0
/* Draws a line connecting two Poin2F structures. */
static int galDrawLine(IDevice *pContext, Point2F a, Point2F b)
{
	GIDevice *icontext = NULL;
	if ((icontext = (GIDevice*)pContext) == NULL)
	{
		return GI_ERROR;
	}

	struct pipe_context *context;
	if ((context = icontext->context) == NULL)
	{
		return GI_ERROR;
	}

	GIEdge edge[2];

	edge[0].x = a.x;
	edge[0].y = a.y;
	edge[0].r = 1.0f;
	edge[0].g = 1.0f;
	edge[0].b = 1.0f;
	edge[0].a = 1.0f;

	edge[1].x = b.x;
	edge[1].y = b.y;
	edge[1].r = 1.0f;
	edge[1].g = 1.0f;
	edge[1].b = 1.0f;
	edge[1].a = 0.4f;



	pipe_buffer_write(context, icontext->buffer, 0, sizeof(edge), edge);


	struct pipe_draw_info info;
	memset(&info, 0, sizeof(info));
	info.instance_count = 1;
	info.max_index = 0xffffffff;
	info.mode = PIPE_PRIM_LINES;
	info.start = 0;
	info.count = 2;
	info.min_index = 0;
	info.max_index = 0 + 2 - 1;

	SetPixelShader(context, icontext->fs_color);

	context->draw_vbo(context, &info);

	SetPixelShader(context, NULL);

	return GI_SUCCESS;
}
static struct pipe_resource *
create_solid_vertexbuf(struct pipe_context *pctx)
{
	static const float init_shader_const[] = {
			-1.000000, +1.000000, +1.000000,
			+1.000000, -1.000000, +1.000000,
	};
	struct pipe_resource *prsc = pipe_buffer_create(pctx->screen,
			PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, sizeof(init_shader_const));
	pipe_buffer_write(pctx, prsc, 0,
			sizeof(init_shader_const), init_shader_const);
	return prsc;
}
示例#8
0
文件: xa_renderer.c 项目: UIKit0/mesa
void
renderer_set_constants(struct xa_context *r,
		       int shader_type, const float *params, int param_bytes)
{
    struct pipe_resource **cbuf =
	(shader_type == PIPE_SHADER_VERTEX) ? &r->vs_const_buffer :
	&r->fs_const_buffer;

    pipe_resource_reference(cbuf, NULL);
    *cbuf = pipe_buffer_create(r->pipe->screen,
			       PIPE_BIND_CONSTANT_BUFFER, PIPE_USAGE_STATIC,
			       param_bytes);

    if (*cbuf) {
	pipe_buffer_write(r->pipe, *cbuf, 0, param_bytes, params);
    }
    pipe_set_constant_buffer(r->pipe, shader_type, 0, *cbuf);
}
示例#9
0
/**
 * Pass the given program parameters to the graphics pipe as a
 * constant buffer.
 * \param shader_type  either PIPE_SHADER_VERTEX or PIPE_SHADER_FRAGMENT
 */
void st_upload_constants( struct st_context *st,
                          struct gl_program_parameter_list *params,
                          unsigned shader_type)
{
   struct pipe_context *pipe = st->pipe;
   struct pipe_resource **cbuf = &st->state.constants[shader_type];

   assert(shader_type == PIPE_SHADER_VERTEX ||
          shader_type == PIPE_SHADER_FRAGMENT ||
          shader_type == PIPE_SHADER_GEOMETRY);

   /* update constants */
   if (params && params->NumParameters) {
      const uint paramBytes = params->NumParameters * sizeof(GLfloat) * 4;

      _mesa_load_state_parameters(st->ctx, params);

      /* We always need to get a new buffer, to keep the drivers simple and
       * avoid gratuitous rendering synchronization.
       */
      pipe_resource_reference(cbuf, NULL );
      *cbuf = pipe_buffer_create(pipe->screen,
				 PIPE_BIND_CONSTANT_BUFFER,
				 paramBytes );

      if (ST_DEBUG & DEBUG_CONSTANTS) {
	 debug_printf("%s(shader=%d, numParams=%d, stateFlags=0x%x)\n", 
                      __FUNCTION__, shader_type, params->NumParameters,
                      params->StateFlags);
         _mesa_print_parameter_list(params);
      }

      /* load Mesa constants into the constant buffer */
      pipe_buffer_write(st->pipe, *cbuf,
				    0, paramBytes,
				    params->ParameterValues);

      st->pipe->set_constant_buffer(st->pipe, shader_type, 0, *cbuf);
   }
   else {
      st->constants.tracked_state[shader_type].dirty.mesa = 0x0;
   }
}
示例#10
0
/**
 * Set custom renderer fragment shader, and optionally set samplers and views
 * and upload the fragment constant buffer.
 *
 * This function modifies fragment_shader, samplers and fragment_sampler_views
 * states.
 */
static void renderer_set_custom_fs(struct renderer *renderer,
                                   void *fs,
                                   const struct pipe_sampler_state **samplers,
                                   struct pipe_sampler_view **views,
                                   VGint num_samplers,
                                   const void *const_buffer,
                                   VGint const_buffer_len)
{
   cso_set_fragment_shader_handle(renderer->cso, fs);

   /* set samplers and views */
   if (num_samplers) {
      cso_set_samplers(renderer->cso, PIPE_SHADER_FRAGMENT, num_samplers, samplers);
      cso_set_sampler_views(renderer->cso, PIPE_SHADER_FRAGMENT, num_samplers, views);
   }

   /* upload fs constant buffer */
   if (const_buffer_len) {
      struct pipe_resource *cbuf = renderer->fs_cbuf;

      if (!cbuf || renderer->fs_cbuf_len != const_buffer_len ||
          memcmp(renderer->fs_cbuf_data, const_buffer, const_buffer_len)) {
         pipe_resource_reference(&cbuf, NULL);

         cbuf = pipe_buffer_create(renderer->pipe->screen,
               PIPE_BIND_CONSTANT_BUFFER, PIPE_USAGE_STATIC,
               const_buffer_len);
         pipe_buffer_write(renderer->pipe, cbuf, 0,
               const_buffer_len, const_buffer);
         pipe_set_constant_buffer(renderer->pipe,
               PIPE_SHADER_FRAGMENT, 0, cbuf);

         renderer->fs_cbuf = cbuf;
         if (const_buffer_len <= sizeof(renderer->fs_cbuf_data)) {
            memcpy(renderer->fs_cbuf_data, const_buffer, const_buffer_len);
            renderer->fs_cbuf_len = const_buffer_len;
         }
         else {
            renderer->fs_cbuf_len = 0;
         }
      }
   }
}
示例#11
0
void renderer_set_constants(struct xorg_renderer *r,
                            int shader_type,
                            const float *params,
                            int param_bytes)
{
   struct pipe_constant_buffer *cbuf =
      (shader_type == PIPE_SHADER_VERTEX) ? &r->vs_const_buffer :
      &r->fs_const_buffer;

   pipe_buffer_reference(&cbuf->buffer, NULL);
   cbuf->buffer = pipe_buffer_create(r->pipe->screen, 16,
                                     PIPE_BUFFER_USAGE_CONSTANT,
                                     param_bytes);

   if (cbuf->buffer) {
      pipe_buffer_write(r->pipe->screen, cbuf->buffer,
                        0, param_bytes, params);
   }
   r->pipe->set_constant_buffer(r->pipe, shader_type, 0, cbuf);
}
示例#12
0
/* Set the transformation for this Graphics context. */
static int galSetTransform(IDevice *pContext, Matrix4x4F *pMatrix)
{
	GIDevice *icontext = NULL;
	if ((icontext = (GIDevice*)pContext) == NULL)
	{
		return GI_ERROR;
	}

	if ((pMatrix == NULL))
	{
		return GI_ERROR;
	}

	memcpy((void*)&icontext->transform, (void*)pMatrix, sizeof(Matrix4x4F));

	pipe_buffer_write(icontext->context, icontext->constant, 16, sizeof(icontext->transform), &icontext->transform);

	return GI_SUCCESS;

}
示例#13
0
/**
 * Set the model-view-projection matrix used by vertex shaders.
 */
static void renderer_set_mvp(struct renderer *renderer,
                             const struct matrix *mvp)
{
   struct matrix *cur = &renderer->mvp;
   struct pipe_resource *cbuf;
   VGfloat consts[3][4];
   VGint i;

   /* projection only */
   if (!mvp)
      mvp = &renderer->projection;

   /* re-upload only if necessary */
   if (memcmp(cur, mvp, sizeof(*mvp)) == 0)
      return;

   /* 3x3 matrix to 3 constant vectors (no Z) */
   for (i = 0; i < 3; i++) {
      consts[i][0] = mvp->m[i + 0];
      consts[i][1] = mvp->m[i + 3];
      consts[i][2] = 0.0f;
      consts[i][3] = mvp->m[i + 6];
   }

   cbuf = renderer->vs_cbuf;
   pipe_resource_reference(&cbuf, NULL);
   cbuf = pipe_buffer_create(renderer->pipe->screen,
                             PIPE_BIND_CONSTANT_BUFFER,
                             PIPE_USAGE_STATIC,
                             sizeof(consts));
   if (cbuf) {
      pipe_buffer_write(renderer->pipe, cbuf,
            0, sizeof(consts), consts);
   }
   pipe_set_constant_buffer(renderer->pipe,
         PIPE_SHADER_VERTEX, 0, cbuf);

   memcpy(cur, mvp, sizeof(*mvp));
   renderer->vs_cbuf = cbuf;
}
示例#14
0
static struct pipe_resource *
create_solid_vertexbuf(struct pipe_context *pctx)
{
	static const float init_shader_const[] = {
			/* for clear/gmem2mem: */
			-1.000000, +1.000000, +1.000000, +1.100000,
			+1.000000, +1.000000, -1.000000, -1.100000,
			+1.000000, +1.100000, -1.100000, +1.000000,
			/* for mem2gmem: (vertices) */
			-1.000000, +1.000000, +1.000000, +1.000000,
			+1.000000, +1.000000, -1.000000, -1.000000,
			+1.000000, +1.000000, -1.000000, +1.000000,
			/* for mem2gmem: (tex coords) */
			+0.000000, +0.000000, +1.000000, +0.000000,
			+0.000000, +1.000000, +1.000000, +1.000000,
	};
	struct pipe_resource *prsc = pipe_buffer_create(pctx->screen,
			PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, sizeof(init_shader_const));
	pipe_buffer_write(pctx, prsc, 0,
			sizeof(init_shader_const), init_shader_const);
	return prsc;
}
示例#15
0
/**
 * Replace data in a subrange of buffer object.  If the data range
 * specified by size + offset extends beyond the end of the buffer or
 * if data is NULL, no copy is performed.
 * Called via glBufferSubDataARB().
 */
static void
st_bufferobj_subdata(struct gl_context *ctx,
		     GLintptrARB offset,
		     GLsizeiptrARB size,
		     const GLvoid * data, struct gl_buffer_object *obj)
{
   struct st_buffer_object *st_obj = st_buffer_object(obj);

   /* we may be called from VBO code, so double-check params here */
   ASSERT(offset >= 0);
   ASSERT(size >= 0);
   ASSERT(offset + size <= obj->Size);

   if (!size)
      return;

   /*
    * According to ARB_vertex_buffer_object specification, if data is null,
    * then the contents of the buffer object's data store is undefined. We just
    * ignore, and leave it unchanged.
    */
   if (!data)
      return;

   if (!st_obj->buffer) {
      /* we probably ran out of memory during buffer allocation */
      return;
   }

   /* Now that transfers are per-context, we don't have to figure out
    * flushing here.  Usually drivers won't need to flush in this case
    * even if the buffer is currently referenced by hardware - they
    * just queue the upload as dma rather than mapping the underlying
    * buffer directly.
    */
   pipe_buffer_write(st_context(ctx)->pipe,
		     st_obj->buffer,
		     offset, size, data);
}
示例#16
0
static struct pipe_resource *
create_solid_vertexbuf(struct pipe_context *pctx)
{
	static const float init_shader_const[] = {
			/* for clear/gmem2mem/mem2gmem (vertices): */
			-1.000000, +1.000000, +1.000000,
			+1.000000, +1.000000, +1.000000,
			-1.000000, -1.000000, +1.000000,
			/* for mem2gmem: (tex coords) */
			+0.000000, +0.000000,
			+1.000000, +0.000000,
			+0.000000, +1.000000,
			/* SCREEN_SCISSOR_BR value (must be at 60 byte offset in page) */
			0.0,
			/* zero indices dummy draw workaround (3 16-bit zeros) */
			0.0, 0.0,
	};
	struct pipe_resource *prsc = pipe_buffer_create(pctx->screen,
			PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE, sizeof(init_shader_const));
	pipe_buffer_write(pctx, prsc, 0,
			sizeof(init_shader_const), init_shader_const);
	return prsc;
}
示例#17
0
static int galDrawImage(IDevice *pContext, ISurface *pSurface, unsigned x, unsigned y, unsigned sx, unsigned sy, unsigned width, unsigned height)
{
	GIDevice *icontext = NULL;
	if ((icontext = (GIDevice*)pContext) == NULL)
	{
		return GI_ERROR;
	}

	GISurface *isurface = NULL;
	if ((isurface = (GISurface*)pSurface) == NULL)
	{
		return GI_ERROR;
	}

	struct pipe_context *context;
	if ((context = icontext->context) == NULL)
	{
		return GI_ERROR;
	}

	struct pipe_sampler_view *view;
	if ((view = isurface->view) == NULL)
	{
		return GI_ERROR;
	}

	GIEdge edge[4];
	edge[0].x = x;
	edge[0].y = y;
	edge[0].r = 0.0f;
	edge[0].g = 0.0f;
	edge[0].b = 0.0f;
	edge[0].a = 0.0f;

	edge[1].x = (x + width);
	edge[1].y = (y);
	edge[1].r = 0.0f;
	edge[1].g = 0.0f;
	edge[1].b = 0.0f;
	edge[1].a = 0.0f;

	edge[2].x = (x + width);
	edge[2].y = (y + height);
	edge[2].r = 0.0f;
	edge[2].g = 0.0f;
	edge[2].b = 0.0f;
	edge[2].a = 0.0f;

	edge[3].x = (x);
	edge[3].y = (y + height);
	edge[3].r = 0.0f;
	edge[3].g = 0.0f;
	edge[3].b = 0.0f;
	edge[3].a = 0.0f;


	pipe_buffer_write(context, icontext->buffer, 0, sizeof(edge), edge);

	struct pipe_draw_info info;
	memset(&info, 0, sizeof(info));
	info.instance_count = 1;
	info.max_index = 0xffffffff;
	info.mode = PIPE_PRIM_QUADS;
	info.start = 0;
	info.count = 4;
	info.min_index = 0;
	info.max_index = 0 + 2 - 1;

	SetPixelShader(context, icontext->fs_image);

	context->set_fragment_sampler_views(context, 1, &view);

	context->draw_vbo(context, &info);

	SetPixelShader(context, NULL);

	return GI_SUCCESS;
}
/**
 * Allocate space for and store data in a buffer object.  Any data that was
 * previously stored in the buffer object is lost.  If data is NULL,
 * memory will be allocated, but no copy will occur.
 * Called via ctx->Driver.BufferData().
 * \return GL_TRUE for success, GL_FALSE if out of memory
 */
static GLboolean
st_bufferobj_data(struct gl_context *ctx,
		  GLenum target,
		  GLsizeiptrARB size,
		  const GLvoid * data,
		  GLenum usage,
                  GLbitfield storageFlags,
		  struct gl_buffer_object *obj)
{
   struct st_context *st = st_context(ctx);
   struct pipe_context *pipe = st->pipe;
   struct pipe_screen *screen = pipe->screen;
   struct st_buffer_object *st_obj = st_buffer_object(obj);
   unsigned bind, pipe_usage, pipe_flags = 0;

   if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD &&
       size && st_obj->buffer &&
       st_obj->Base.Size == size &&
       st_obj->Base.Usage == usage &&
       st_obj->Base.StorageFlags == storageFlags) {
      if (data) {
         /* Just discard the old contents and write new data.
          * This should be the same as creating a new buffer, but we avoid
          * a lot of validation in Mesa.
          */
         struct pipe_box box;

         u_box_1d(0, size, &box);
         pipe->transfer_inline_write(pipe, st_obj->buffer, 0,
                                    PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
                                    &box, data, 0, 0);
         return GL_TRUE;
      } else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) {
         pipe->invalidate_resource(pipe, st_obj->buffer);
         return GL_TRUE;
      }
   }

   st_obj->Base.Size = size;
   st_obj->Base.Usage = usage;
   st_obj->Base.StorageFlags = storageFlags;

   switch (target) {
   case GL_PIXEL_PACK_BUFFER_ARB:
   case GL_PIXEL_UNPACK_BUFFER_ARB:
      bind = PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
      break;
   case GL_ARRAY_BUFFER_ARB:
      bind = PIPE_BIND_VERTEX_BUFFER;
      break;
   case GL_ELEMENT_ARRAY_BUFFER_ARB:
      bind = PIPE_BIND_INDEX_BUFFER;
      break;
   case GL_TEXTURE_BUFFER:
      bind = PIPE_BIND_SAMPLER_VIEW;
      break;
   case GL_TRANSFORM_FEEDBACK_BUFFER:
      bind = PIPE_BIND_STREAM_OUTPUT;
      break;
   case GL_UNIFORM_BUFFER:
      bind = PIPE_BIND_CONSTANT_BUFFER;
      break;
   case GL_DRAW_INDIRECT_BUFFER:
   case GL_PARAMETER_BUFFER_ARB:
      bind = PIPE_BIND_COMMAND_ARGS_BUFFER;
      break;
   case GL_ATOMIC_COUNTER_BUFFER:
   case GL_SHADER_STORAGE_BUFFER:
      bind = PIPE_BIND_SHADER_BUFFER;
      break;
   case GL_QUERY_BUFFER:
      bind = PIPE_BIND_QUERY_BUFFER;
      break;
   default:
      bind = 0;
   }

   /* Set usage. */
   if (st_obj->Base.Immutable) {
      /* BufferStorage */
      if (storageFlags & GL_CLIENT_STORAGE_BIT)
         pipe_usage = PIPE_USAGE_STAGING;
      else
         pipe_usage = PIPE_USAGE_DEFAULT;
   }
   else {
      /* BufferData */
      switch (usage) {
      case GL_STATIC_DRAW:
      case GL_STATIC_COPY:
      default:
	 pipe_usage = PIPE_USAGE_DEFAULT;
         break;
      case GL_DYNAMIC_DRAW:
      case GL_DYNAMIC_COPY:
         pipe_usage = PIPE_USAGE_DYNAMIC;
         break;
      case GL_STREAM_DRAW:
      case GL_STREAM_COPY:
         /* XXX: Remove this test and fall-through when we have PBO unpacking
          * acceleration. Right now, PBO unpacking is done by the CPU, so we
          * have to make sure CPU reads are fast.
          */
         if (target != GL_PIXEL_UNPACK_BUFFER_ARB) {
            pipe_usage = PIPE_USAGE_STREAM;
            break;
         }
         /* fall through */
      case GL_STATIC_READ:
      case GL_DYNAMIC_READ:
      case GL_STREAM_READ:
         pipe_usage = PIPE_USAGE_STAGING;
         break;
      }
   }

   /* Set flags. */
   if (storageFlags & GL_MAP_PERSISTENT_BIT)
      pipe_flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT;
   if (storageFlags & GL_MAP_COHERENT_BIT)
      pipe_flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT;

   pipe_resource_reference( &st_obj->buffer, NULL );

   if (ST_DEBUG & DEBUG_BUFFER) {
      debug_printf("Create buffer size %" PRId64 " bind 0x%x\n",
                   (int64_t) size, bind);
   }

   if (size != 0) {
      struct pipe_resource buffer;

      memset(&buffer, 0, sizeof buffer);
      buffer.target = PIPE_BUFFER;
      buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
      buffer.bind = bind;
      buffer.usage = pipe_usage;
      buffer.flags = pipe_flags;
      buffer.width0 = size;
      buffer.height0 = 1;
      buffer.depth0 = 1;
      buffer.array_size = 1;

      if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) {
         st_obj->buffer =
            screen->resource_from_user_memory(screen, &buffer, (void*)data);
      }
      else {
         st_obj->buffer = screen->resource_create(screen, &buffer);

         if (st_obj->buffer && data)
            pipe_buffer_write(pipe, st_obj->buffer, 0, size, data);
      }

      if (!st_obj->buffer) {
         /* out of memory */
         st_obj->Base.Size = 0;
         return GL_FALSE;
      }
   }

   /* BufferData may change an array or uniform buffer, need to update it */
   st->dirty.st |= ST_NEW_VERTEX_ARRAYS | ST_NEW_UNIFORM_BUFFER;

   return GL_TRUE;
}
示例#19
0
/**
 * Allocate space for and store data in a buffer object.  Any data that was
 * previously stored in the buffer object is lost.  If data is NULL,
 * memory will be allocated, but no copy will occur.
 * Called via ctx->Driver.BufferData().
 * \return GL_TRUE for success, GL_FALSE if out of memory
 */
static GLboolean
st_bufferobj_data(struct gl_context *ctx,
		  GLenum target,
		  GLsizeiptrARB size,
		  const GLvoid * data,
		  GLenum usage, 
		  struct gl_buffer_object *obj)
{
   struct st_context *st = st_context(ctx);
   struct pipe_context *pipe = st->pipe;
   struct st_buffer_object *st_obj = st_buffer_object(obj);
   unsigned bind, pipe_usage;

   st_obj->Base.Size = size;
   st_obj->Base.Usage = usage;
   
   switch(target) {
   case GL_PIXEL_PACK_BUFFER_ARB:
   case GL_PIXEL_UNPACK_BUFFER_ARB:
      bind = PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
      break;
   case GL_ARRAY_BUFFER_ARB:
      bind = PIPE_BIND_VERTEX_BUFFER;
      break;
   case GL_ELEMENT_ARRAY_BUFFER_ARB:
      bind = PIPE_BIND_INDEX_BUFFER;
      break;
   case GL_TEXTURE_BUFFER:
      bind = PIPE_BIND_SAMPLER_VIEW;
      break;
   case GL_TRANSFORM_FEEDBACK_BUFFER:
      bind = PIPE_BIND_STREAM_OUTPUT;
      break;
   case GL_UNIFORM_BUFFER:
      bind = PIPE_BIND_CONSTANT_BUFFER;
      break;
   default:
      bind = 0;
   }

   switch (usage) {
   case GL_STATIC_DRAW:
   case GL_STATIC_READ:
   case GL_STATIC_COPY:
      pipe_usage = PIPE_USAGE_STATIC;
      break;
   case GL_DYNAMIC_DRAW:
   case GL_DYNAMIC_READ:
   case GL_DYNAMIC_COPY:
      pipe_usage = PIPE_USAGE_DYNAMIC;
      break;
   case GL_STREAM_DRAW:
   case GL_STREAM_READ:
   case GL_STREAM_COPY:
      pipe_usage = PIPE_USAGE_STREAM;
      break;
   default:
      pipe_usage = PIPE_USAGE_DEFAULT;
   }

   pipe_resource_reference( &st_obj->buffer, NULL );

   if (size != 0) {
      st_obj->buffer = pipe_buffer_create(pipe->screen, bind,
                                          pipe_usage, size);

      if (!st_obj->buffer) {
         /* out of memory */
         st_obj->Base.Size = 0;
         return GL_FALSE;
      }

      if (data)
         pipe_buffer_write(pipe, st_obj->buffer, 0, size, data);
      return GL_TRUE;
   }

   return GL_TRUE;
}
示例#20
0
static void init_prog(struct program *p)
{
	struct pipe_surface surf_tmpl;
	int ret;

	/* find a hardware device */
	ret = pipe_loader_probe(&p->dev, 1);
	assert(ret);

	/* init a pipe screen */
	p->screen = pipe_loader_create_screen(p->dev, PIPE_SEARCH_DIR);
	assert(p->screen);

	/* create the pipe driver context and cso context */
	p->pipe = p->screen->context_create(p->screen, NULL);
	p->cso = cso_create_context(p->pipe);

	/* set clear color */
	p->clear_color.f[0] = 0.3;
	p->clear_color.f[1] = 0.1;
	p->clear_color.f[2] = 0.3;
	p->clear_color.f[3] = 1.0;

	/* vertex buffer */
	{
		float vertices[4][2][4] = {
			{
				{ 0.9f, 0.9f, 0.0f, 1.0f },
				{ 1.0f, 1.0f, 0.0f, 1.0f }
			},
			{
				{ -0.9f, 0.9f, 0.0f, 1.0f },
				{  0.0f, 1.0f, 0.0f, 1.0f }
			},
			{
				{ -0.9f, -0.9f, 0.0f, 1.0f },
				{  0.0f,  0.0f, 1.0f, 1.0f }
			},
			{
				{ 0.9f, -0.9f, 0.0f, 1.0f },
				{ 1.0f,  0.0f, 1.0f, 1.0f }
			}
		};

		p->vbuf = pipe_buffer_create(p->screen, PIPE_BIND_VERTEX_BUFFER,
					     PIPE_USAGE_DEFAULT, sizeof(vertices));
		pipe_buffer_write(p->pipe, p->vbuf, 0, sizeof(vertices), vertices);
	}

	/* render target texture */
	{
		struct pipe_resource tmplt;
		memset(&tmplt, 0, sizeof(tmplt));
		tmplt.target = PIPE_TEXTURE_2D;
		tmplt.format = PIPE_FORMAT_B8G8R8A8_UNORM; /* All drivers support this */
		tmplt.width0 = WIDTH;
		tmplt.height0 = HEIGHT;
		tmplt.depth0 = 1;
		tmplt.array_size = 1;
		tmplt.last_level = 0;
		tmplt.bind = PIPE_BIND_RENDER_TARGET;

		p->target = p->screen->resource_create(p->screen, &tmplt);
	}

	/* sampler texture */
	{
		uint32_t *ptr;
		struct pipe_transfer *t;
		struct pipe_resource t_tmplt;
		struct pipe_sampler_view v_tmplt;
		struct pipe_box box;

		memset(&t_tmplt, 0, sizeof(t_tmplt));
		t_tmplt.target = PIPE_TEXTURE_2D;
		t_tmplt.format = PIPE_FORMAT_B8G8R8A8_UNORM; /* All drivers support this */
		t_tmplt.width0 = 2;
		t_tmplt.height0 = 2;
		t_tmplt.depth0 = 1;
		t_tmplt.array_size = 1;
		t_tmplt.last_level = 0;
		t_tmplt.bind = PIPE_BIND_RENDER_TARGET;

		p->tex = p->screen->resource_create(p->screen, &t_tmplt);

		memset(&box, 0, sizeof(box));
		box.width = 2;
		box.height = 2;

		ptr = p->pipe->transfer_map(p->pipe, p->tex, 0, PIPE_TRANSFER_WRITE, &box, &t);
		ptr[0] = 0xffff0000;
		ptr[1] = 0xff0000ff;
		ptr[2] = 0xff00ff00;
		ptr[3] = 0xffffff00;
		p->pipe->transfer_unmap(p->pipe, t);

		u_sampler_view_default_template(&v_tmplt, p->tex, p->tex->format);

		p->view = p->pipe->create_sampler_view(p->pipe, p->tex, &v_tmplt);
	}

	/* disabled blending/masking */
	memset(&p->blend, 0, sizeof(p->blend));
	p->blend.rt[0].colormask = PIPE_MASK_RGBA;

	/* no-op depth/stencil/alpha */
	memset(&p->depthstencil, 0, sizeof(p->depthstencil));

	/* rasterizer */
	memset(&p->rasterizer, 0, sizeof(p->rasterizer));
	p->rasterizer.cull_face = PIPE_FACE_NONE;
	p->rasterizer.half_pixel_center = 1;
	p->rasterizer.bottom_edge_rule = 1;
	p->rasterizer.depth_clip = 1;

	/* sampler */
	memset(&p->sampler, 0, sizeof(p->sampler));
	p->sampler.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
	p->sampler.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
	p->sampler.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
	p->sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
	p->sampler.min_img_filter = PIPE_TEX_MIPFILTER_LINEAR;
	p->sampler.mag_img_filter = PIPE_TEX_MIPFILTER_LINEAR;
	p->sampler.normalized_coords = 1;

	surf_tmpl.format = PIPE_FORMAT_B8G8R8A8_UNORM; /* All drivers support this */
	surf_tmpl.u.tex.level = 0;
	surf_tmpl.u.tex.first_layer = 0;
	surf_tmpl.u.tex.last_layer = 0;
	/* drawing destination */
	memset(&p->framebuffer, 0, sizeof(p->framebuffer));
	p->framebuffer.width = WIDTH;
	p->framebuffer.height = HEIGHT;
	p->framebuffer.nr_cbufs = 1;
	p->framebuffer.cbufs[0] = p->pipe->create_surface(p->pipe, p->target, &surf_tmpl);

	/* viewport, depth isn't really needed */
	{
		float x = 0;
		float y = 0;
		float z = FAR;
		float half_width = (float)WIDTH / 2.0f;
		float half_height = (float)HEIGHT / 2.0f;
		float half_depth = ((float)FAR - (float)NEAR) / 2.0f;
		float scale, bias;

		if (FLIP) {
			scale = -1.0f;
			bias = (float)HEIGHT;
		} else {
			scale = 1.0f;
			bias = 0.0f;
		}

		p->viewport.scale[0] = half_width;
		p->viewport.scale[1] = half_height * scale;
		p->viewport.scale[2] = half_depth;

		p->viewport.translate[0] = half_width + x;
		p->viewport.translate[1] = (half_height + y) * scale + bias;
		p->viewport.translate[2] = half_depth + z;
	}

	/* vertex elements state */
	memset(p->velem, 0, sizeof(p->velem));
	p->velem[0].src_offset = 0 * 4 * sizeof(float); /* offset 0, first element */
	p->velem[0].instance_divisor = 0;
	p->velem[0].vertex_buffer_index = 0;
	p->velem[0].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;

	p->velem[1].src_offset = 1 * 4 * sizeof(float); /* offset 16, second element */
	p->velem[1].instance_divisor = 0;
	p->velem[1].vertex_buffer_index = 0;
	p->velem[1].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;

	/* vertex shader */
	{
		const uint semantic_names[] = { TGSI_SEMANTIC_POSITION,
		                                TGSI_SEMANTIC_GENERIC };
		const uint semantic_indexes[] = { 0, 0 };
		p->vs = util_make_vertex_passthrough_shader(p->pipe, 2, semantic_names, semantic_indexes, FALSE);
	}

	/* fragment shader */
	p->fs = util_make_fragment_tex_shader(p->pipe, TGSI_TEXTURE_2D, TGSI_INTERPOLATE_LINEAR);
}
示例#21
0
/**
 * Allocate space for and store data in a buffer object.  Any data that was
 * previously stored in the buffer object is lost.  If data is NULL,
 * memory will be allocated, but no copy will occur.
 * Called via ctx->Driver.BufferData().
 * \return GL_TRUE for success, GL_FALSE if out of memory
 */
static GLboolean
st_bufferobj_data(struct gl_context *ctx,
		  GLenum target,
		  GLsizeiptrARB size,
		  const GLvoid * data,
		  GLenum usage,
                  GLbitfield storageFlags,
		  struct gl_buffer_object *obj)
{
   struct st_context *st = st_context(ctx);
   struct pipe_context *pipe = st->pipe;
   struct st_buffer_object *st_obj = st_buffer_object(obj);
   unsigned bind, pipe_usage, pipe_flags = 0;

   if (size && data && st_obj->buffer &&
       st_obj->Base.Size == size &&
       st_obj->Base.Usage == usage &&
       st_obj->Base.StorageFlags == storageFlags) {
      /* Just discard the old contents and write new data.
       * This should be the same as creating a new buffer, but we avoid
       * a lot of validation in Mesa.
       */
      struct pipe_box box;

      u_box_1d(0, size, &box);
      pipe->transfer_inline_write(pipe, st_obj->buffer, 0,
                                  PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
                                  &box, data, 0, 0);
      return GL_TRUE;
   }

   st_obj->Base.Size = size;
   st_obj->Base.Usage = usage;
   st_obj->Base.StorageFlags = storageFlags;

   switch (target) {
   case GL_PIXEL_PACK_BUFFER_ARB:
   case GL_PIXEL_UNPACK_BUFFER_ARB:
      bind = PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
      break;
   case GL_ARRAY_BUFFER_ARB:
      bind = PIPE_BIND_VERTEX_BUFFER;
      break;
   case GL_ELEMENT_ARRAY_BUFFER_ARB:
      bind = PIPE_BIND_INDEX_BUFFER;
      break;
   case GL_TEXTURE_BUFFER:
      bind = PIPE_BIND_SAMPLER_VIEW;
      break;
   case GL_TRANSFORM_FEEDBACK_BUFFER:
      bind = PIPE_BIND_STREAM_OUTPUT;
      break;
   case GL_UNIFORM_BUFFER:
      bind = PIPE_BIND_CONSTANT_BUFFER;
      break;
   default:
      bind = 0;
   }

   /* Set usage. */
   if (st_obj->Base.Immutable) {
      /* BufferStorage */
      if (storageFlags & GL_CLIENT_STORAGE_BIT)
         pipe_usage = PIPE_USAGE_STAGING;
      else
         pipe_usage = PIPE_USAGE_DEFAULT;
   }
   else {
      /* BufferData */
      switch (usage) {
      case GL_STATIC_DRAW:
      case GL_STATIC_READ:
      case GL_STATIC_COPY:
      default:
	 pipe_usage = PIPE_USAGE_DEFAULT;
         break;
      case GL_DYNAMIC_DRAW:
      case GL_DYNAMIC_READ:
      case GL_DYNAMIC_COPY:
         pipe_usage = PIPE_USAGE_DYNAMIC;
         break;
      case GL_STREAM_DRAW:
      case GL_STREAM_READ:
      case GL_STREAM_COPY:
         pipe_usage = PIPE_USAGE_STREAM;
         break;
      }
   }

   /* Set flags. */
   if (storageFlags & GL_MAP_PERSISTENT_BIT)
      pipe_flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT;
   if (storageFlags & GL_MAP_COHERENT_BIT)
      pipe_flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT;

   pipe_resource_reference( &st_obj->buffer, NULL );

   if (ST_DEBUG & DEBUG_BUFFER) {
      debug_printf("Create buffer size %td bind 0x%x\n", size, bind);
   }

   if (size != 0) {
      struct pipe_resource buffer;

      memset(&buffer, 0, sizeof buffer);
      buffer.target = PIPE_BUFFER;
      buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
      buffer.bind = bind;
      buffer.usage = pipe_usage;
      buffer.flags = pipe_flags;
      buffer.width0 = size;
      buffer.height0 = 1;
      buffer.depth0 = 1;
      buffer.array_size = 1;

      st_obj->buffer = pipe->screen->resource_create(pipe->screen, &buffer);

      if (!st_obj->buffer) {
         /* out of memory */
         st_obj->Base.Size = 0;
         return GL_FALSE;
      }

      if (data)
         pipe_buffer_write(pipe, st_obj->buffer, 0, size, data);
   }

   /* BufferData may change an array or uniform buffer, need to update it */
   st->dirty.st |= ST_NEW_VERTEX_ARRAYS | ST_NEW_UNIFORM_BUFFER;

   return GL_TRUE;
}
示例#22
0
/**
 * Draw quad with texcoords and optional color.
 * Coords are gallium window coords with y=0=top.
 * \param color  may be null
 * \param invertTex  if true, flip texcoords vertically
 */
static void
draw_quad(struct gl_context *ctx, GLfloat x0, GLfloat y0, GLfloat z,
          GLfloat x1, GLfloat y1, const GLfloat *color,
          GLboolean invertTex, GLfloat maxXcoord, GLfloat maxYcoord)
{
   struct st_context *st = st_context(ctx);
   struct pipe_context *pipe = st->pipe;
   GLfloat verts[4][3][4]; /* four verts, three attribs, XYZW */

   /* setup vertex data */
   {
      const struct gl_framebuffer *fb = st->ctx->DrawBuffer;
      const GLfloat fb_width = (GLfloat) fb->Width;
      const GLfloat fb_height = (GLfloat) fb->Height;
      const GLfloat clip_x0 = x0 / fb_width * 2.0f - 1.0f;
      const GLfloat clip_y0 = y0 / fb_height * 2.0f - 1.0f;
      const GLfloat clip_x1 = x1 / fb_width * 2.0f - 1.0f;
      const GLfloat clip_y1 = y1 / fb_height * 2.0f - 1.0f;
      const GLfloat sLeft = 0.0f, sRight = maxXcoord;
      const GLfloat tTop = invertTex ? maxYcoord : 0.0f;
      const GLfloat tBot = invertTex ? 0.0f : maxYcoord;
      GLuint i;

      /* upper-left */
      verts[0][0][0] = clip_x0;    /* v[0].attr[0].x */
      verts[0][0][1] = clip_y0;    /* v[0].attr[0].y */

      /* upper-right */
      verts[1][0][0] = clip_x1;
      verts[1][0][1] = clip_y0;

      /* lower-right */
      verts[2][0][0] = clip_x1;
      verts[2][0][1] = clip_y1;

      /* lower-left */
      verts[3][0][0] = clip_x0;
      verts[3][0][1] = clip_y1;

      verts[0][1][0] = sLeft; /* v[0].attr[1].S */
      verts[0][1][1] = tTop;  /* v[0].attr[1].T */
      verts[1][1][0] = sRight;
      verts[1][1][1] = tTop;
      verts[2][1][0] = sRight;
      verts[2][1][1] = tBot;
      verts[3][1][0] = sLeft;
      verts[3][1][1] = tBot;

      /* same for all verts: */
      if (color) {
         for (i = 0; i < 4; i++) {
            verts[i][0][2] = z;         /* v[i].attr[0].z */
            verts[i][0][3] = 1.0f;      /* v[i].attr[0].w */
            verts[i][2][0] = color[0];  /* v[i].attr[2].r */
            verts[i][2][1] = color[1];  /* v[i].attr[2].g */
            verts[i][2][2] = color[2];  /* v[i].attr[2].b */
            verts[i][2][3] = color[3];  /* v[i].attr[2].a */
            verts[i][1][2] = 0.0f;      /* v[i].attr[1].R */
            verts[i][1][3] = 1.0f;      /* v[i].attr[1].Q */
         }
      }
      else {
         for (i = 0; i < 4; i++) {
            verts[i][0][2] = z;    /*Z*/
            verts[i][0][3] = 1.0f; /*W*/
            verts[i][1][2] = 0.0f; /*R*/
            verts[i][1][3] = 1.0f; /*Q*/
         }
      }
   }

   {
      struct pipe_resource *buf;

      /* allocate/load buffer object with vertex data */
      buf = pipe_buffer_create(pipe->screen,
			       PIPE_BIND_VERTEX_BUFFER,
			       PIPE_USAGE_STATIC,
                               sizeof(verts));
      pipe_buffer_write(st->pipe, buf, 0, sizeof(verts), verts);

      util_draw_vertex_buffer(pipe, st->cso_context, buf, 0,
                              PIPE_PRIM_QUADS,
                              4,  /* verts */
                              3); /* attribs/vert */
      pipe_resource_reference(&buf, NULL);
   }
}
示例#23
0
/** Initialize the internal details */
struct program *
pp_init_prog(struct pp_queue_t *ppq, struct pipe_screen *pscreen)
{

   struct program *p;

   pp_debug("Initializing program\n");
   if (!pscreen)
      return NULL;

   p = CALLOC(1, sizeof(struct program));
   if (!p)
      return NULL;

   p->screen = pscreen;
   p->pipe = pscreen->context_create(pscreen, NULL);
   p->cso = cso_create_context(p->pipe);

   {
      static const float verts[4][2][4] = {
         {
          {1.0f, 1.0f, 0.0f, 1.0f},
          {1.0f, 1.0f, 0.0f, 1.0f}
          },
         {
          {-1.0f, 1.0f, 0.0f, 1.0f},
          {0.0f, 1.0f, 0.0f, 1.0f}
          },
         {
          {-1.0f, -1.0f, 0.0f, 1.0f},
          {0.0f, 0.0f, 0.0f, 1.0f}
          },
         {
          {1.0f, -1.0f, 0.0f, 1.0f},
          {1.0f, 0.0f, 0.0f, 1.0f}
          }
      };

      p->vbuf = pipe_buffer_create(pscreen, PIPE_BIND_VERTEX_BUFFER,
                                   PIPE_USAGE_STATIC, sizeof(verts));
      pipe_buffer_write(p->pipe, p->vbuf, 0, sizeof(verts), verts);
   }

   p->blend.rt[0].colormask = PIPE_MASK_RGBA;
   p->blend.rt[0].rgb_src_factor = p->blend.rt[0].alpha_src_factor =
      PIPE_BLENDFACTOR_SRC_ALPHA;
   p->blend.rt[0].rgb_dst_factor = p->blend.rt[0].alpha_dst_factor =
      PIPE_BLENDFACTOR_INV_SRC_ALPHA;

   p->rasterizer.cull_face = PIPE_FACE_NONE;
   p->rasterizer.gl_rasterization_rules = 1;

   p->sampler.wrap_s = p->sampler.wrap_t = p->sampler.wrap_r =
      PIPE_TEX_WRAP_CLAMP_TO_EDGE;

   p->sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
   p->sampler.min_img_filter = p->sampler.mag_img_filter =
      PIPE_TEX_FILTER_LINEAR;
   p->sampler.normalized_coords = 1;

   p->sampler_point.wrap_s = p->sampler_point.wrap_t =
      p->sampler_point.wrap_r = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
   p->sampler_point.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
   p->sampler_point.min_img_filter = p->sampler_point.mag_img_filter =
      PIPE_TEX_FILTER_NEAREST;
   p->sampler_point.normalized_coords = 1;

   p->velem[0].src_offset = 0;
   p->velem[0].instance_divisor = 0;
   p->velem[0].vertex_buffer_index = 0;
   p->velem[0].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
   p->velem[1].src_offset = 1 * 4 * sizeof(float);
   p->velem[1].instance_divisor = 0;
   p->velem[1].vertex_buffer_index = 0;
   p->velem[1].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;

   if (!p->screen->is_format_supported(p->screen,
                                       PIPE_FORMAT_R32G32B32A32_FLOAT,
                                       PIPE_BUFFER, 1,
                                       PIPE_BIND_VERTEX_BUFFER))
      pp_debug("Vertex buf format fail\n");


   {
      const uint semantic_names[] = { TGSI_SEMANTIC_POSITION,
         TGSI_SEMANTIC_GENERIC
      };
      const uint semantic_indexes[] = { 0, 0 };
      p->passvs = util_make_vertex_passthrough_shader(p->pipe, 2,
                                                      semantic_names,
                                                      semantic_indexes);
   }

   p->framebuffer.nr_cbufs = 1;

   p->surf.usage = PIPE_BIND_RENDER_TARGET;
   p->surf.format = PIPE_FORMAT_B8G8R8A8_UNORM;

   p->pipe->set_sample_mask(p->pipe, ~0);

   return p;
}
示例#24
0
static void
st_StoreQueryResult(struct gl_context *ctx, struct gl_query_object *q,
                    struct gl_buffer_object *buf, intptr_t offset,
                    GLenum pname, GLenum ptype)
{
   struct pipe_context *pipe = st_context(ctx)->pipe;
   struct st_query_object *stq = st_query_object(q);
   struct st_buffer_object *stObj = st_buffer_object(buf);
   boolean wait = pname == GL_QUERY_RESULT;
   enum pipe_query_value_type result_type;
   int index;

   /* GL_QUERY_TARGET is a bit of an extension since it has nothing to
    * do with the GPU end of the query. Write it in "by hand".
    */
   if (pname == GL_QUERY_TARGET) {
      /* Assume that the data must be LE. The endianness situation wrt CPU and
       * GPU is incredibly confusing, but the vast majority of GPUs are
       * LE. When a BE one comes along, this needs some form of resolution.
       */
      unsigned data[2] = { CPU_TO_LE32(q->Target), 0 };
      pipe_buffer_write(pipe, stObj->buffer, offset,
                        (ptype == GL_INT64_ARB ||
                         ptype == GL_UNSIGNED_INT64_ARB) ? 8 : 4,
                        data);
      return;
   }

   switch (ptype) {
   case GL_INT:
      result_type = PIPE_QUERY_TYPE_I32;
      break;
   case GL_UNSIGNED_INT:
      result_type = PIPE_QUERY_TYPE_U32;
      break;
   case GL_INT64_ARB:
      result_type = PIPE_QUERY_TYPE_I64;
      break;
   case GL_UNSIGNED_INT64_ARB:
      result_type = PIPE_QUERY_TYPE_U64;
      break;
   default:
      unreachable("Unexpected result type");
   }

   if (pname == GL_QUERY_RESULT_AVAILABLE) {
      index = -1;
   } else if (stq->type == PIPE_QUERY_PIPELINE_STATISTICS) {
      switch (q->Target) {
      case GL_VERTICES_SUBMITTED_ARB:
         index = 0;
         break;
      case GL_PRIMITIVES_SUBMITTED_ARB:
         index = 1;
         break;
      case GL_VERTEX_SHADER_INVOCATIONS_ARB:
         index = 2;
         break;
      case GL_GEOMETRY_SHADER_INVOCATIONS:
         index = 3;
         break;
      case GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED_ARB:
         index = 4;
         break;
      case GL_CLIPPING_INPUT_PRIMITIVES_ARB:
         index = 5;
         break;
      case GL_CLIPPING_OUTPUT_PRIMITIVES_ARB:
         index = 6;
         break;
      case GL_FRAGMENT_SHADER_INVOCATIONS_ARB:
         index = 7;
         break;
      case GL_TESS_CONTROL_SHADER_PATCHES_ARB:
         index = 8;
         break;
      case GL_TESS_EVALUATION_SHADER_INVOCATIONS_ARB:
         index = 9;
         break;
      case GL_COMPUTE_SHADER_INVOCATIONS_ARB:
         index = 10;
         break;
      default:
         unreachable("Unexpected target");
      }
   } else {
      index = 0;
   }

   pipe->get_query_result_resource(pipe, stq->pq, wait, result_type, index,
                                   stObj->buffer, offset);
}
示例#25
0
/* Draws a rectangle specified by a Rect4F structure. */
static int galDrawRectangle(IDevice *pContext, Rect4F bounds)
{
	GIDevice *icontext = NULL;
	if ((icontext = (GIDevice*)pContext) == NULL)
	{
		return GI_ERROR;
	}

	struct pipe_context *context;
	if ((context = icontext->context) == NULL)
	{
		return GI_ERROR;
	}

	GIEdge edge[4];
	edge[0].x = (bounds.x);
	edge[0].y = (bounds.y);
	edge[0].r = 1.0f;
	edge[0].g = 0.0f;
	edge[0].b = 0.0f;
	edge[0].a = 1.0f;

	edge[1].x = (bounds.x + bounds.width);
	edge[1].y = (bounds.y);
	edge[1].r = 0.0f;
	edge[1].g = 1.0f;
	edge[1].b = 0.0f;
	edge[1].a = 0.6f;

	edge[2].x = (bounds.x + bounds.width);
	edge[2].y = (bounds.y + bounds.height);
	edge[2].r = 0.0f;
	edge[2].g = 0.0f;
	edge[2].b = 1.0f;
	edge[2].a = 0.6f;

	edge[3].x = (bounds.x);
	edge[3].y = (bounds.y + bounds.height);
	edge[3].r = 1.0f;
	edge[3].g = 0.0f;
	edge[3].b = 0.0f;
	edge[3].a = 0.6f;


	pipe_buffer_write(context, icontext->buffer, 0, sizeof(edge), edge);

	struct pipe_draw_info info;
	memset(&info, 0, sizeof(info));
	info.instance_count = 1;
	info.max_index = 0xffffffff;
	info.mode = PIPE_PRIM_QUADS;
	info.start = 0;
	info.count = 4;
	info.min_index = 0;
	info.max_index = 0 + 2 - 1;

	SetPixelShader(context, icontext->fs_color);

	context->draw_vbo(context, &info);

	SetPixelShader(context, NULL);

	return GI_SUCCESS;
}
示例#26
0
/**
 * Allocate space for and store data in a buffer object.  Any data that was
 * previously stored in the buffer object is lost.  If data is NULL,
 * memory will be allocated, but no copy will occur.
 * Called via ctx->Driver.BufferData().
 * \return GL_TRUE for success, GL_FALSE if out of memory
 */
static GLboolean
st_bufferobj_data(struct gl_context *ctx,
		  GLenum target,
		  GLsizeiptrARB size,
		  const GLvoid * data,
		  GLenum usage,
		  struct gl_buffer_object *obj)
{
   struct st_context *st = st_context(ctx);
   struct pipe_context *pipe = st->pipe;
   struct st_buffer_object *st_obj = st_buffer_object(obj);
   unsigned bind, pipe_usage;

   if (size && data && st_obj->buffer &&
       st_obj->Base.Size == size && st_obj->Base.Usage == usage) {
      /* Just discard the old contents and write new data.
       * This should be the same as creating a new buffer, but we avoid
       * a lot of validation in Mesa.
       */
      struct pipe_box box;

      u_box_1d(0, size, &box);
      pipe->transfer_inline_write(pipe, st_obj->buffer, 0,
                                  PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE,
                                  &box, data, 0, 0);
      return GL_TRUE;
   }

   st_obj->Base.Size = size;
   st_obj->Base.Usage = usage;

   switch (target) {
   case GL_PIXEL_PACK_BUFFER_ARB:
   case GL_PIXEL_UNPACK_BUFFER_ARB:
      bind = PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW;
      break;
   case GL_ARRAY_BUFFER_ARB:
      bind = PIPE_BIND_VERTEX_BUFFER;
      break;
   case GL_ELEMENT_ARRAY_BUFFER_ARB:
      bind = PIPE_BIND_INDEX_BUFFER;
      break;
   case GL_TEXTURE_BUFFER:
      bind = PIPE_BIND_SAMPLER_VIEW;
      break;
   case GL_TRANSFORM_FEEDBACK_BUFFER:
      bind = PIPE_BIND_STREAM_OUTPUT;
      break;
   case GL_UNIFORM_BUFFER:
      bind = PIPE_BIND_CONSTANT_BUFFER;
      break;
   default:
      bind = 0;
   }

   switch (usage) {
   case GL_STATIC_DRAW:
   case GL_STATIC_READ:
   case GL_STATIC_COPY:
   default:
      pipe_usage = PIPE_USAGE_DEFAULT;
      break;
   case GL_DYNAMIC_DRAW:
   case GL_DYNAMIC_READ:
   case GL_DYNAMIC_COPY:
      pipe_usage = PIPE_USAGE_DYNAMIC;
      break;
   case GL_STREAM_DRAW:
   case GL_STREAM_READ:
   case GL_STREAM_COPY:
      pipe_usage = PIPE_USAGE_STREAM;
      break;
   }

   pipe_resource_reference( &st_obj->buffer, NULL );

   if (ST_DEBUG & DEBUG_BUFFER) {
      debug_printf("Create buffer size %td bind 0x%x\n", size, bind);
   }

   if (size != 0) {
      st_obj->buffer = pipe_buffer_create(pipe->screen, bind,
                                          pipe_usage, size);

      if (!st_obj->buffer) {
         /* out of memory */
         st_obj->Base.Size = 0;
         return GL_FALSE;
      }

      if (data)
         pipe_buffer_write(pipe, st_obj->buffer, 0, size, data);
   }

   /* BufferData may change an array or uniform buffer, need to update it */
   st->dirty.st |= ST_NEW_VERTEX_ARRAYS | ST_NEW_UNIFORM_BUFFER;

   return GL_TRUE;
}
static void init_prog(struct program *p)
{
	struct pipe_surface surf_tmpl;
	int ret;

	/* find a hardware device */
	ret = pipe_loader_probe(&p->dev, 1);
	assert(ret);

	/* init a pipe screen */
	p->screen = pipe_loader_create_screen(p->dev, PIPE_SEARCH_DIR);
	assert(p->screen);

	/* create the pipe driver context and cso context */
	p->pipe = p->screen->context_create(p->screen, NULL);
	p->cso = cso_create_context(p->pipe);

	/* set clear color */
	p->clear_color.f[0] = 0.3;
	p->clear_color.f[1] = 0.1;
	p->clear_color.f[2] = 0.3;
	p->clear_color.f[3] = 1.0;

	/* vertex buffer */
	{
		float vertices[4][2][4] = {
			{
				{ 0.0f, -0.9f, 0.0f, 1.0f },
				{ 1.0f, 0.0f, 0.0f, 1.0f }
			},
			{
				{ -0.9f, 0.9f, 0.0f, 1.0f },
				{ 0.0f, 1.0f, 0.0f, 1.0f }
			},
			{
				{ 0.9f, 0.9f, 0.0f, 1.0f },
				{ 0.0f, 0.0f, 1.0f, 1.0f }
			}
		};

		p->vbuf = pipe_buffer_create(p->screen, PIPE_BIND_VERTEX_BUFFER,
					     PIPE_USAGE_STATIC, sizeof(vertices));
		pipe_buffer_write(p->pipe, p->vbuf, 0, sizeof(vertices), vertices);
	}

	/* render target texture */
	{
		struct pipe_resource tmplt;
		memset(&tmplt, 0, sizeof(tmplt));
		tmplt.target = PIPE_TEXTURE_2D;
		tmplt.format = PIPE_FORMAT_B8G8R8A8_UNORM; /* All drivers support this */
		tmplt.width0 = WIDTH;
		tmplt.height0 = HEIGHT;
		tmplt.depth0 = 1;
		tmplt.array_size = 1;
		tmplt.last_level = 0;
		tmplt.bind = PIPE_BIND_RENDER_TARGET;

		p->target = p->screen->resource_create(p->screen, &tmplt);
	}

	/* disabled blending/masking */
	memset(&p->blend, 0, sizeof(p->blend));
	p->blend.rt[0].colormask = PIPE_MASK_RGBA;

	/* no-op depth/stencil/alpha */
	memset(&p->depthstencil, 0, sizeof(p->depthstencil));

	/* rasterizer */
	memset(&p->rasterizer, 0, sizeof(p->rasterizer));
	p->rasterizer.cull_face = PIPE_FACE_NONE;
	p->rasterizer.gl_rasterization_rules = 1;
	p->rasterizer.depth_clip = 1;

	surf_tmpl.format = PIPE_FORMAT_B8G8R8A8_UNORM;
	surf_tmpl.usage = PIPE_BIND_RENDER_TARGET;
	surf_tmpl.u.tex.level = 0;
	surf_tmpl.u.tex.first_layer = 0;
	surf_tmpl.u.tex.last_layer = 0;
	/* drawing destination */
	memset(&p->framebuffer, 0, sizeof(p->framebuffer));
	p->framebuffer.width = WIDTH;
	p->framebuffer.height = HEIGHT;
	p->framebuffer.nr_cbufs = 1;
	p->framebuffer.cbufs[0] = p->pipe->create_surface(p->pipe, p->target, &surf_tmpl);

	/* viewport, depth isn't really needed */
	{
		float x = 0;
		float y = 0;
		float z = FAR;
		float half_width = (float)WIDTH / 2.0f;
		float half_height = (float)HEIGHT / 2.0f;
		float half_depth = ((float)FAR - (float)NEAR) / 2.0f;
		float scale, bias;

		if (FLIP) {
			scale = -1.0f;
			bias = (float)HEIGHT;
		} else {
			scale = 1.0f;
			bias = 0.0f;
		}

		p->viewport.scale[0] = half_width;
		p->viewport.scale[1] = half_height * scale;
		p->viewport.scale[2] = half_depth;
		p->viewport.scale[3] = 1.0f;

		p->viewport.translate[0] = half_width + x;
		p->viewport.translate[1] = (half_height + y) * scale + bias;
		p->viewport.translate[2] = half_depth + z;
		p->viewport.translate[3] = 0.0f;
	}

	/* vertex elements state */
	memset(p->velem, 0, sizeof(p->velem));
	p->velem[0].src_offset = 0 * 4 * sizeof(float); /* offset 0, first element */
	p->velem[0].instance_divisor = 0;
	p->velem[0].vertex_buffer_index = 0;
	p->velem[0].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;

	p->velem[1].src_offset = 1 * 4 * sizeof(float); /* offset 16, second element */
	p->velem[1].instance_divisor = 0;
	p->velem[1].vertex_buffer_index = 0;
	p->velem[1].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;

	/* vertex shader */
	{
			const uint semantic_names[] = { TGSI_SEMANTIC_POSITION,
							TGSI_SEMANTIC_COLOR };
			const uint semantic_indexes[] = { 0, 0 };
			p->vs = util_make_vertex_passthrough_shader(p->pipe, 2, semantic_names, semantic_indexes);
	}

	/* fragment shader */
	p->fs = util_make_fragment_passthrough_shader(p->pipe);
}
示例#28
0
static struct pipe_context *si_create_context(struct pipe_screen *screen,
                                              unsigned flags)
{
	struct si_context *sctx = CALLOC_STRUCT(si_context);
	struct si_screen* sscreen = (struct si_screen *)screen;
	struct radeon_winsys *ws = sscreen->ws;
	int shader, i;
	bool stop_exec_on_failure = (flags & PIPE_CONTEXT_LOSE_CONTEXT_ON_RESET) != 0;

	if (!sctx)
		return NULL;

	sctx->has_graphics = sscreen->info.chip_class == SI ||
			     !(flags & PIPE_CONTEXT_COMPUTE_ONLY);

	if (flags & PIPE_CONTEXT_DEBUG)
		sscreen->record_llvm_ir = true; /* racy but not critical */

	sctx->b.screen = screen; /* this must be set first */
	sctx->b.priv = NULL;
	sctx->b.destroy = si_destroy_context;
	sctx->screen = sscreen; /* Easy accessing of screen/winsys. */
	sctx->is_debug = (flags & PIPE_CONTEXT_DEBUG) != 0;

	slab_create_child(&sctx->pool_transfers, &sscreen->pool_transfers);
	slab_create_child(&sctx->pool_transfers_unsync, &sscreen->pool_transfers);

	sctx->ws = sscreen->ws;
	sctx->family = sscreen->info.family;
	sctx->chip_class = sscreen->info.chip_class;

	if (sscreen->info.has_gpu_reset_counter_query) {
		sctx->gpu_reset_counter =
			sctx->ws->query_value(sctx->ws, RADEON_GPU_RESET_COUNTER);
	}


	if (sctx->chip_class == CIK ||
	    sctx->chip_class == VI ||
	    sctx->chip_class == GFX9) {
		sctx->eop_bug_scratch = si_resource(
			pipe_buffer_create(&sscreen->b, 0, PIPE_USAGE_DEFAULT,
					   16 * sscreen->info.num_render_backends));
		if (!sctx->eop_bug_scratch)
			goto fail;
	}

	/* Initialize context allocators. */
	sctx->allocator_zeroed_memory =
		u_suballocator_create(&sctx->b, 128 * 1024,
				      0, PIPE_USAGE_DEFAULT,
				      SI_RESOURCE_FLAG_UNMAPPABLE |
				      SI_RESOURCE_FLAG_CLEAR, false);
	if (!sctx->allocator_zeroed_memory)
		goto fail;

	sctx->b.stream_uploader = u_upload_create(&sctx->b, 1024 * 1024,
						    0, PIPE_USAGE_STREAM,
						    SI_RESOURCE_FLAG_READ_ONLY);
	if (!sctx->b.stream_uploader)
		goto fail;

	sctx->cached_gtt_allocator = u_upload_create(&sctx->b, 16 * 1024,
						       0, PIPE_USAGE_STAGING, 0);
	if (!sctx->cached_gtt_allocator)
		goto fail;

	sctx->ctx = sctx->ws->ctx_create(sctx->ws);
	if (!sctx->ctx)
		goto fail;

	if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
		sctx->dma_cs = sctx->ws->cs_create(sctx->ctx, RING_DMA,
						   (void*)si_flush_dma_cs,
						   sctx, stop_exec_on_failure);
	}

	bool use_sdma_upload = sscreen->info.has_dedicated_vram && sctx->dma_cs;
	sctx->b.const_uploader = u_upload_create(&sctx->b, 256 * 1024,
						 0, PIPE_USAGE_DEFAULT,
						 SI_RESOURCE_FLAG_32BIT |
						 (use_sdma_upload ?
							  SI_RESOURCE_FLAG_UPLOAD_FLUSH_EXPLICIT_VIA_SDMA :
							  (sscreen->cpdma_prefetch_writes_memory ?
								   0 : SI_RESOURCE_FLAG_READ_ONLY)));
	if (!sctx->b.const_uploader)
		goto fail;

	if (use_sdma_upload)
		u_upload_enable_flush_explicit(sctx->b.const_uploader);

	sctx->gfx_cs = ws->cs_create(sctx->ctx,
				     sctx->has_graphics ? RING_GFX : RING_COMPUTE,
				     (void*)si_flush_gfx_cs, sctx, stop_exec_on_failure);

	/* Border colors. */
	sctx->border_color_table = malloc(SI_MAX_BORDER_COLORS *
					  sizeof(*sctx->border_color_table));
	if (!sctx->border_color_table)
		goto fail;

	sctx->border_color_buffer = si_resource(
		pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT,
				   SI_MAX_BORDER_COLORS *
				   sizeof(*sctx->border_color_table)));
	if (!sctx->border_color_buffer)
		goto fail;

	sctx->border_color_map =
		ws->buffer_map(sctx->border_color_buffer->buf,
			       NULL, PIPE_TRANSFER_WRITE);
	if (!sctx->border_color_map)
		goto fail;

	/* Initialize context functions used by graphics and compute. */
	sctx->b.emit_string_marker = si_emit_string_marker;
	sctx->b.set_debug_callback = si_set_debug_callback;
	sctx->b.set_log_context = si_set_log_context;
	sctx->b.set_context_param = si_set_context_param;
	sctx->b.get_device_reset_status = si_get_reset_status;
	sctx->b.set_device_reset_callback = si_set_device_reset_callback;

	si_init_all_descriptors(sctx);
	si_init_buffer_functions(sctx);
	si_init_clear_functions(sctx);
	si_init_blit_functions(sctx);
	si_init_compute_functions(sctx);
	si_init_compute_blit_functions(sctx);
	si_init_debug_functions(sctx);
	si_init_fence_functions(sctx);
	si_init_state_compute_functions(sctx);

	if (sscreen->debug_flags & DBG(FORCE_DMA))
		sctx->b.resource_copy_region = sctx->dma_copy;

	/* Initialize graphics-only context functions. */
	if (sctx->has_graphics) {
		si_init_context_texture_functions(sctx);
		si_init_query_functions(sctx);
		si_init_msaa_functions(sctx);
		si_init_shader_functions(sctx);
		si_init_state_functions(sctx);
		si_init_streamout_functions(sctx);
		si_init_viewport_functions(sctx);

		sctx->blitter = util_blitter_create(&sctx->b);
		if (sctx->blitter == NULL)
			goto fail;
		sctx->blitter->skip_viewport_restore = true;

		si_init_draw_functions(sctx);
	}

	/* Initialize SDMA functions. */
	if (sctx->chip_class >= CIK)
		cik_init_sdma_functions(sctx);
	else
		si_init_dma_functions(sctx);

	sctx->sample_mask = 0xffff;

	/* Initialize multimedia functions. */
	if (sscreen->info.has_hw_decode) {
		sctx->b.create_video_codec = si_uvd_create_decoder;
		sctx->b.create_video_buffer = si_video_buffer_create;
	} else {
		sctx->b.create_video_codec = vl_create_decoder;
		sctx->b.create_video_buffer = vl_video_buffer_create;
	}

	if (sctx->chip_class >= GFX9) {
		sctx->wait_mem_scratch = si_resource(
			pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT, 4));
		if (!sctx->wait_mem_scratch)
			goto fail;

		/* Initialize the memory. */
		si_cp_write_data(sctx, sctx->wait_mem_scratch, 0, 4,
				 V_370_MEM, V_370_ME, &sctx->wait_mem_number);
	}

	/* CIK cannot unbind a constant buffer (S_BUFFER_LOAD doesn't skip loads
	 * if NUM_RECORDS == 0). We need to use a dummy buffer instead. */
	if (sctx->chip_class == CIK) {
		sctx->null_const_buf.buffer =
			pipe_aligned_buffer_create(screen,
						   SI_RESOURCE_FLAG_32BIT,
						   PIPE_USAGE_DEFAULT, 16,
						   sctx->screen->info.tcc_cache_line_size);
		if (!sctx->null_const_buf.buffer)
			goto fail;
		sctx->null_const_buf.buffer_size = sctx->null_const_buf.buffer->width0;

		unsigned start_shader = sctx->has_graphics ? 0 :  PIPE_SHADER_COMPUTE;
		for (shader = start_shader; shader < SI_NUM_SHADERS; shader++) {
			for (i = 0; i < SI_NUM_CONST_BUFFERS; i++) {
				sctx->b.set_constant_buffer(&sctx->b, shader, i,
							      &sctx->null_const_buf);
			}
		}

		si_set_rw_buffer(sctx, SI_HS_CONST_DEFAULT_TESS_LEVELS,
				 &sctx->null_const_buf);
		si_set_rw_buffer(sctx, SI_VS_CONST_INSTANCE_DIVISORS,
				 &sctx->null_const_buf);
		si_set_rw_buffer(sctx, SI_VS_CONST_CLIP_PLANES,
				 &sctx->null_const_buf);
		si_set_rw_buffer(sctx, SI_PS_CONST_POLY_STIPPLE,
				 &sctx->null_const_buf);
		si_set_rw_buffer(sctx, SI_PS_CONST_SAMPLE_POSITIONS,
				 &sctx->null_const_buf);
	}

	uint64_t max_threads_per_block;
	screen->get_compute_param(screen, PIPE_SHADER_IR_TGSI,
				  PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK,
				  &max_threads_per_block);

	/* The maximum number of scratch waves. Scratch space isn't divided
	 * evenly between CUs. The number is only a function of the number of CUs.
	 * We can decrease the constant to decrease the scratch buffer size.
	 *
	 * sctx->scratch_waves must be >= the maximum posible size of
	 * 1 threadgroup, so that the hw doesn't hang from being unable
	 * to start any.
	 *
	 * The recommended value is 4 per CU at most. Higher numbers don't
	 * bring much benefit, but they still occupy chip resources (think
	 * async compute). I've seen ~2% performance difference between 4 and 32.
	 */
	sctx->scratch_waves = MAX2(32 * sscreen->info.num_good_compute_units,
				   max_threads_per_block / 64);

	si_init_compiler(sscreen, &sctx->compiler);

	/* Bindless handles. */
	sctx->tex_handles = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
						    _mesa_key_pointer_equal);
	sctx->img_handles = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
						    _mesa_key_pointer_equal);

	util_dynarray_init(&sctx->resident_tex_handles, NULL);
	util_dynarray_init(&sctx->resident_img_handles, NULL);
	util_dynarray_init(&sctx->resident_tex_needs_color_decompress, NULL);
	util_dynarray_init(&sctx->resident_img_needs_color_decompress, NULL);
	util_dynarray_init(&sctx->resident_tex_needs_depth_decompress, NULL);

	sctx->sample_pos_buffer =
		pipe_buffer_create(sctx->b.screen, 0, PIPE_USAGE_DEFAULT,
				   sizeof(sctx->sample_positions));
	pipe_buffer_write(&sctx->b, sctx->sample_pos_buffer, 0,
			  sizeof(sctx->sample_positions), &sctx->sample_positions);

	/* this must be last */
	si_begin_new_gfx_cs(sctx);

	if (sctx->chip_class == CIK) {
		/* Clear the NULL constant buffer, because loads should return zeros.
		 * Note that this forces CP DMA to be used, because clover deadlocks
		 * for some reason when the compute codepath is used.
		 */
		uint32_t clear_value = 0;
		si_clear_buffer(sctx, sctx->null_const_buf.buffer, 0,
				sctx->null_const_buf.buffer->width0,
				&clear_value, 4, SI_COHERENCY_SHADER, true);
	}
	return &sctx->b;
fail:
	fprintf(stderr, "radeonsi: Failed to create a context.\n");
	si_destroy_context(&sctx->b);
	return NULL;
}