Ejemplo n.º 1
0
static GLboolean brwProgramStringNotify( GLcontext *ctx,
        GLenum target,
        struct gl_program *prog )
{
    struct brw_context *brw = brw_context(ctx);
    int i;

    if (target == GL_FRAGMENT_PROGRAM_ARB) {
        struct gl_fragment_program *fprog = (struct gl_fragment_program *) prog;
        struct brw_fragment_program *newFP = brw_fragment_program(fprog);
        const struct brw_fragment_program *curFP =
            brw_fragment_program_const(brw->fragment_program);

        if (fprog->FogOption) {
            _mesa_append_fog_code(ctx, fprog);
            fprog->FogOption = GL_NONE;
        }

        if (newFP == curFP)
            brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
        newFP->id = brw->program_id++;
        newFP->isGLSL = brw_wm_is_glsl(fprog);
    }
    else if (target == GL_VERTEX_PROGRAM_ARB) {
        struct gl_vertex_program *vprog = (struct gl_vertex_program *) prog;
        struct brw_vertex_program *newVP = brw_vertex_program(vprog);
        const struct brw_vertex_program *curVP =
            brw_vertex_program_const(brw->vertex_program);

        if (newVP == curVP)
            brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
        if (newVP->program.IsPositionInvariant) {
            _mesa_insert_mvp_code(ctx, &newVP->program);
        }
        newVP->id = brw->program_id++;

        /* Also tell tnl about it:
         */
        _tnl_program_string(ctx, target, prog);
    }

    /* Reject programs with subroutines, which are totally broken at the moment
     * (all program flows return when any program flow returns, and
     * the VS also hangs if a function call calls a function.
     *
     * See piglit glsl-{vs,fs}-functions-[23] tests.
     */
    for (i = 0; i < prog->NumInstructions; i++) {
        if (prog->Instructions[i].Opcode == OPCODE_CAL) {
            shader_error(ctx, prog,
                         "i965 driver doesn't yet support uninlined function "
                         "calls.  Move to using a single return statement at "
                         "the end of the function to work around it.");
            return GL_FALSE;
        }
    }

    return GL_TRUE;
}
Ejemplo n.º 2
0
static void brwProgramStringNotify( GLcontext *ctx,
				    GLenum target,
				    struct gl_program *prog )
{
   struct brw_context *brw = brw_context(ctx);

   if (target == GL_FRAGMENT_PROGRAM_ARB) {
      struct gl_fragment_program *fprog = (struct gl_fragment_program *) prog;
      struct brw_fragment_program *newFP = brw_fragment_program(fprog);
      const struct brw_fragment_program *curFP =
         brw_fragment_program_const(brw->fragment_program);

      if (fprog->FogOption) {
         _mesa_append_fog_code(ctx, fprog);
         fprog->FogOption = GL_NONE;
      }

      if (newFP == curFP)
	 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
      newFP->id = brw->program_id++;      
      newFP->isGLSL = brw_wm_is_glsl(fprog);
   }
   else if (target == GL_VERTEX_PROGRAM_ARB) {
      struct gl_vertex_program *vprog = (struct gl_vertex_program *) prog;
      struct brw_vertex_program *newVP = brw_vertex_program(vprog);
      const struct brw_vertex_program *curVP =
         brw_vertex_program_const(brw->vertex_program);

      if (newVP == curVP)
	 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
      if (newVP->program.IsPositionInvariant) {
	 _mesa_insert_mvp_code(ctx, &newVP->program);
      }
      newVP->id = brw->program_id++;      

      /* Also tell tnl about it:
       */
      _tnl_program_string(ctx, target, prog);
   }
}
Ejemplo n.º 3
0
static void
gen6_upload_vs_push_constants(struct brw_context *brw)
{
   struct brw_stage_state *stage_state = &brw->vs.base;

   /* _BRW_NEW_VERTEX_PROGRAM */
   const struct brw_vertex_program *vp =
      brw_vertex_program_const(brw->vertex_program);
   /* BRW_NEW_VS_PROG_DATA */
   const struct brw_stage_prog_data *prog_data = &brw->vs.prog_data->base.base;

   gen6_upload_push_constants(brw, &vp->program.Base, prog_data,
                              stage_state, AUB_TRACE_VS_CONSTANTS);

   if (brw->gen >= 7) {
      if (brw->gen == 7 && !brw->is_haswell && !brw->is_baytrail)
         gen7_emit_vs_workaround_flush(brw);

      gen7_upload_constant_state(brw, stage_state, true /* active */,
                                 _3DSTATE_CONSTANT_VS);
   }
}
static GLboolean
brwProgramStringNotify(struct gl_context *ctx,
		       GLenum target,
		       struct gl_program *prog)
{
   struct brw_context *brw = brw_context(ctx);
   const struct brw_compiler *compiler = brw->intelScreen->compiler;

   switch (target) {
   case GL_FRAGMENT_PROGRAM_ARB: {
      struct gl_fragment_program *fprog = (struct gl_fragment_program *) prog;
      struct brw_fragment_program *newFP = brw_fragment_program(fprog);
      const struct brw_fragment_program *curFP =
         brw_fragment_program_const(brw->fragment_program);

      if (newFP == curFP)
	 brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM;
      newFP->id = get_new_program_id(brw->intelScreen);

      brw_add_texrect_params(prog);

      prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_FRAGMENT, true);

      brw_fs_precompile(ctx, NULL, prog);
      break;
   }
   case GL_VERTEX_PROGRAM_ARB: {
      struct gl_vertex_program *vprog = (struct gl_vertex_program *) prog;
      struct brw_vertex_program *newVP = brw_vertex_program(vprog);
      const struct brw_vertex_program *curVP =
         brw_vertex_program_const(brw->vertex_program);

      if (newVP == curVP)
	 brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM;
      if (newVP->program.IsPositionInvariant) {
	 _mesa_insert_mvp_code(ctx, &newVP->program);
      }
      newVP->id = get_new_program_id(brw->intelScreen);

      /* Also tell tnl about it:
       */
      _tnl_program_string(ctx, target, prog);

      brw_add_texrect_params(prog);

      prog->nir = brw_create_nir(brw, NULL, prog, MESA_SHADER_VERTEX,
                                 compiler->scalar_stage[MESA_SHADER_VERTEX]);

      brw_vs_precompile(ctx, NULL, prog);
      break;
   }
   default:
      /*
       * driver->ProgramStringNotify is only called for ARB programs, fixed
       * function vertex programs, and ir_to_mesa (which isn't used by the
       * i965 back-end).  Therefore, even after geometry shaders are added,
       * this function should only ever be called with a target of
       * GL_VERTEX_PROGRAM_ARB or GL_FRAGMENT_PROGRAM_ARB.
       */
      unreachable("Unexpected target in brwProgramStringNotify");
   }

   return true;
}
Ejemplo n.º 5
0
/* Upload a new set of constants.  Too much variability to go into the
 * cache mechanism, but maybe would benefit from a comparison against
 * the current uploaded set of constants.
 */
static void
brw_upload_constant_buffer(struct brw_context *brw)
{
   struct intel_context *intel = &brw->intel;
   struct gl_context *ctx = &intel->ctx;
   const struct brw_vertex_program *vp =
      brw_vertex_program_const(brw->vertex_program);
   const GLuint sz = brw->curbe.total_size;
   const GLuint bufsz = sz * 16 * sizeof(GLfloat);
   GLfloat *buf;
   GLuint i;
   gl_clip_plane *clip_planes;

   if (sz == 0) {
      brw->curbe.last_bufsz  = 0;
      goto emit;
   }

   buf = brw->curbe.next_buf;

   /* fragment shader constants */
   if (brw->curbe.wm_size) {
      GLuint offset = brw->curbe.wm_start * 16;

      /* copy float constants */
      for (i = 0; i < brw->wm.prog_data->nr_params; i++) {
	 buf[offset + i] = convert_param(brw->wm.prog_data->param_convert[i],
					 brw->wm.prog_data->param[i]);
      }
   }


   /* When using the old VS backend, the clipplanes are actually delivered to
    * both CLIP and VS units.  VS uses them to calculate the outcode bitmasks.
    *
    * When using the new VS backend, it is responsible for setting up its own
    * clipplane constants if it needs them.  This results in a slight waste of
    * of curbe space, but the advantage is that the new VS backend can use its
    * general-purpose uniform layout code to store the clipplanes.
    */
   if (brw->curbe.clip_size) {
      GLuint offset = brw->curbe.clip_start * 16;
      GLuint j;

      /* If any planes are going this way, send them all this way:
       */
      for (i = 0; i < 6; i++) {
	 buf[offset + i * 4 + 0] = fixed_plane[i][0];
	 buf[offset + i * 4 + 1] = fixed_plane[i][1];
	 buf[offset + i * 4 + 2] = fixed_plane[i][2];
	 buf[offset + i * 4 + 3] = fixed_plane[i][3];
      }

      /* Clip planes: _NEW_TRANSFORM plus _NEW_PROJECTION to get to
       * clip-space:
       */
      clip_planes = brw_select_clip_planes(ctx);
      for (j = 0; j < MAX_CLIP_PLANES; j++) {
	 if (ctx->Transform.ClipPlanesEnabled & (1<<j)) {
	    buf[offset + i * 4 + 0] = clip_planes[j][0];
	    buf[offset + i * 4 + 1] = clip_planes[j][1];
	    buf[offset + i * 4 + 2] = clip_planes[j][2];
	    buf[offset + i * 4 + 3] = clip_planes[j][3];
	    i++;
	 }
      }
   }

   /* vertex shader constants */
   if (brw->curbe.vs_size) {
      GLuint offset = brw->curbe.vs_start * 16;
      GLuint nr = brw->vs.prog_data->nr_params / 4;

      if (brw->vs.prog_data->uses_new_param_layout) {
	 for (i = 0; i < brw->vs.prog_data->nr_params; i++) {
	    buf[offset + i] = *brw->vs.prog_data->param[i];
	 }
      } else {
	 /* Load the subset of push constants that will get used when
	  * we also have a pull constant buffer.
	  */
	 for (i = 0; i < vp->program.Base.Parameters->NumParameters; i++) {
	    if (brw->vs.constant_map[i] != -1) {
	       assert(brw->vs.constant_map[i] <= nr);
	       memcpy(buf + offset + brw->vs.constant_map[i] * 4,
		      vp->program.Base.Parameters->ParameterValues[i],
		      4 * sizeof(float));
	    }
	 }
      }
   }

   if (0) {
      for (i = 0; i < sz*16; i+=4) 
	 printf("curbe %d.%d: %f %f %f %f\n", i/8, i&4,
		buf[i+0], buf[i+1], buf[i+2], buf[i+3]);

      printf("last_buf %p buf %p sz %d/%d cmp %d\n",
	     brw->curbe.last_buf, buf,
	     bufsz, brw->curbe.last_bufsz,
	     brw->curbe.last_buf ? memcmp(buf, brw->curbe.last_buf, bufsz) : -1);
   }

   if (brw->curbe.curbe_bo != NULL &&
       bufsz == brw->curbe.last_bufsz &&
       memcmp(buf, brw->curbe.last_buf, bufsz) == 0) {
      /* constants have not changed */
   } else {
      /* Update the record of what our last set of constants was.  We
       * don't just flip the pointers because we don't fill in the
       * data in the padding between the entries.
       */
      memcpy(brw->curbe.last_buf, buf, bufsz);
      brw->curbe.last_bufsz = bufsz;

      if (brw->curbe.curbe_bo != NULL &&
	  brw->curbe.curbe_next_offset + bufsz > brw->curbe.curbe_bo->size)
      {
	 drm_intel_gem_bo_unmap_gtt(brw->curbe.curbe_bo);
	 drm_intel_bo_unreference(brw->curbe.curbe_bo);
	 brw->curbe.curbe_bo = NULL;
      }

      if (brw->curbe.curbe_bo == NULL) {
	 /* Allocate a single page for CURBE entries for this batchbuffer.
	  * They're generally around 64b.
	  */
	 brw->curbe.curbe_bo = drm_intel_bo_alloc(brw->intel.bufmgr, "CURBE",
						  4096, 1 << 6);
	 brw->curbe.curbe_next_offset = 0;
	 drm_intel_gem_bo_map_gtt(brw->curbe.curbe_bo);
	 assert(bufsz < 4096);
      }

      brw->curbe.curbe_offset = brw->curbe.curbe_next_offset;
      brw->curbe.curbe_next_offset += bufsz;
      brw->curbe.curbe_next_offset = ALIGN(brw->curbe.curbe_next_offset, 64);

      /* Copy data to the buffer:
       */
      memcpy(brw->curbe.curbe_bo->virtual + brw->curbe.curbe_offset,
	     buf,
	     bufsz);
   }

   /* Because this provokes an action (ie copy the constants into the
    * URB), it shouldn't be shortcircuited if identical to the
    * previous time - because eg. the urb destination may have
    * changed, or the urb contents different to last time.
    *
    * Note that the data referred to is actually copied internally,
    * not just used in place according to passed pointer.
    *
    * It appears that the CS unit takes care of using each available
    * URB entry (Const URB Entry == CURBE) in turn, and issuing
    * flushes as necessary when doublebuffering of CURBEs isn't
    * possible.
    */

emit:
   BEGIN_BATCH(2);
   if (brw->curbe.total_size == 0) {
      OUT_BATCH((CMD_CONST_BUFFER << 16) | (2 - 2));
      OUT_BATCH(0);
   } else {
      OUT_BATCH((CMD_CONST_BUFFER << 16) | (1 << 8) | (2 - 2));
      OUT_RELOC(brw->curbe.curbe_bo,
		I915_GEM_DOMAIN_INSTRUCTION, 0,
		(brw->curbe.total_size - 1) + brw->curbe.curbe_offset);
   }
   ADVANCE_BATCH();
}
Ejemplo n.º 6
0
static GLboolean brwProgramStringNotify( struct gl_context *ctx,
                                         GLenum target,
                                         struct gl_program *prog )
{
   struct brw_context *brw = brw_context(ctx);
   int i;

   if (target == GL_FRAGMENT_PROGRAM_ARB) {
      struct gl_fragment_program *fprog = (struct gl_fragment_program *) prog;
      struct brw_fragment_program *newFP = brw_fragment_program(fprog);
      const struct brw_fragment_program *curFP =
         brw_fragment_program_const(brw->fragment_program);
      struct gl_shader_program *shader_program;

      if (newFP == curFP)
	 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
      newFP->id = brw->program_id++;      

      /* Don't reject fragment shaders for their Mesa IR state when we're
       * using the new FS backend.
       */
      shader_program = _mesa_lookup_shader_program(ctx, prog->Id);
      if (shader_program
	  && shader_program->_LinkedShaders[MESA_SHADER_FRAGMENT]) {
	 return GL_TRUE;
      }
   }
   else if (target == GL_VERTEX_PROGRAM_ARB) {
      struct gl_vertex_program *vprog = (struct gl_vertex_program *) prog;
      struct brw_vertex_program *newVP = brw_vertex_program(vprog);
      const struct brw_vertex_program *curVP =
         brw_vertex_program_const(brw->vertex_program);

      if (newVP == curVP)
	 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
      if (newVP->program.IsPositionInvariant) {
	 _mesa_insert_mvp_code(ctx, &newVP->program);
      }
      newVP->id = brw->program_id++;      

      /* Also tell tnl about it:
       */
      _tnl_program_string(ctx, target, prog);
   }

   /* Reject programs with subroutines, which are totally broken at the moment
    * (all program flows return when any program flow returns, and
    * the VS also hangs if a function call calls a function.
    *
    * See piglit glsl-{vs,fs}-functions-[23] tests.
    */
   for (i = 0; i < prog->NumInstructions; i++) {
      struct prog_instruction *inst = prog->Instructions + i;
      int r;

      if (prog->Instructions[i].Opcode == OPCODE_CAL) {
	 shader_error(ctx, prog,
		      "i965 driver doesn't yet support uninlined function "
		      "calls.  Move to using a single return statement at "
		      "the end of the function to work around it.\n");
	 return GL_FALSE;
      }

      if (prog->Instructions[i].Opcode == OPCODE_RET) {
	 shader_error(ctx, prog,
		      "i965 driver doesn't yet support \"return\" "
		      "from main().\n");
	 return GL_FALSE;
      }

      for (r = 0; r < _mesa_num_inst_src_regs(inst->Opcode); r++) {
	 if (prog->Instructions[i].SrcReg[r].RelAddr &&
	     prog->Instructions[i].SrcReg[r].File == PROGRAM_INPUT) {
	    shader_error(ctx, prog,
			 "Variable indexing of shader inputs unsupported\n");
	    return GL_FALSE;
	 }
      }

      if (target == GL_FRAGMENT_PROGRAM_ARB &&
	  prog->Instructions[i].DstReg.RelAddr &&
	  prog->Instructions[i].DstReg.File == PROGRAM_OUTPUT) {
	 shader_error(ctx, prog,
		      "Variable indexing of FS outputs unsupported\n");
	 return GL_FALSE;
      }
      if (target == GL_FRAGMENT_PROGRAM_ARB) {
	 if ((prog->Instructions[i].DstReg.RelAddr &&
	      prog->Instructions[i].DstReg.File == PROGRAM_TEMPORARY) ||
	     (prog->Instructions[i].SrcReg[0].RelAddr &&
	      prog->Instructions[i].SrcReg[0].File == PROGRAM_TEMPORARY) ||
	     (prog->Instructions[i].SrcReg[1].RelAddr &&
	      prog->Instructions[i].SrcReg[1].File == PROGRAM_TEMPORARY) ||
	     (prog->Instructions[i].SrcReg[2].RelAddr &&
	      prog->Instructions[i].SrcReg[2].File == PROGRAM_TEMPORARY)) {
	    shader_error(ctx, prog,
			 "Variable indexing of variable arrays in the FS "
			 "unsupported\n");
	    return GL_FALSE;
	 }
      }
   }

   return GL_TRUE;
}
Ejemplo n.º 7
0
/* Upload a new set of constants.  Too much variability to go into the
 * cache mechanism, but maybe would benefit from a comparison against
 * the current uploaded set of constants.
 */
static void prepare_constant_buffer(struct brw_context *brw)
{
   struct gl_context *ctx = &brw->intel.ctx;
   const struct brw_vertex_program *vp =
      brw_vertex_program_const(brw->vertex_program);
   const GLuint sz = brw->curbe.total_size;
   const GLuint bufsz = sz * 16 * sizeof(GLfloat);
   GLfloat *buf;
   GLuint i;

   if (sz == 0) {
      brw->curbe.last_bufsz  = 0;
      return;
   }

   buf = brw->curbe.next_buf;

   /* fragment shader constants */
   if (brw->curbe.wm_size) {
      GLuint offset = brw->curbe.wm_start * 16;

      /* copy float constants */
      for (i = 0; i < brw->wm.prog_data->nr_params; i++) {
	 buf[offset + i] = convert_param(brw->wm.prog_data->param_convert[i],
					 *brw->wm.prog_data->param[i]);
      }
   }


   /* The clipplanes are actually delivered to both CLIP and VS units.
    * VS uses them to calculate the outcode bitmasks.
    */
   if (brw->curbe.clip_size) {
      GLuint offset = brw->curbe.clip_start * 16;
      GLuint j;

      /* If any planes are going this way, send them all this way:
       */
      for (i = 0; i < 6; i++) {
	 buf[offset + i * 4 + 0] = fixed_plane[i][0];
	 buf[offset + i * 4 + 1] = fixed_plane[i][1];
	 buf[offset + i * 4 + 2] = fixed_plane[i][2];
	 buf[offset + i * 4 + 3] = fixed_plane[i][3];
      }

      /* Clip planes: _NEW_TRANSFORM plus _NEW_PROJECTION to get to
       * clip-space:
       */
      assert(MAX_CLIP_PLANES == 6);
      for (j = 0; j < MAX_CLIP_PLANES; j++) {
	 if (ctx->Transform.ClipPlanesEnabled & (1<<j)) {
	    buf[offset + i * 4 + 0] = ctx->Transform._ClipUserPlane[j][0];
	    buf[offset + i * 4 + 1] = ctx->Transform._ClipUserPlane[j][1];
	    buf[offset + i * 4 + 2] = ctx->Transform._ClipUserPlane[j][2];
	    buf[offset + i * 4 + 3] = ctx->Transform._ClipUserPlane[j][3];
	    i++;
	 }
      }
   }

   /* vertex shader constants */
   if (brw->curbe.vs_size) {
      GLuint offset = brw->curbe.vs_start * 16;
      GLuint nr = brw->vs.prog_data->nr_params / 4;

      /* Load the subset of push constants that will get used when
       * we also have a pull constant buffer.
       */
      for (i = 0; i < vp->program.Base.Parameters->NumParameters; i++) {
	 if (brw->vs.constant_map[i] != -1) {
	    assert(brw->vs.constant_map[i] <= nr);
	    memcpy(buf + offset + brw->vs.constant_map[i] * 4,
		   vp->program.Base.Parameters->ParameterValues[i],
		   4 * sizeof(float));
	 }
      }
   }

   if (0) {
      for (i = 0; i < sz*16; i+=4) 
	 printf("curbe %d.%d: %f %f %f %f\n", i/8, i&4,
		buf[i+0], buf[i+1], buf[i+2], buf[i+3]);

      printf("last_buf %p buf %p sz %d/%d cmp %d\n",
	     brw->curbe.last_buf, buf,
	     bufsz, brw->curbe.last_bufsz,
	     brw->curbe.last_buf ? memcmp(buf, brw->curbe.last_buf, bufsz) : -1);
   }

   if (brw->curbe.curbe_bo != NULL &&
       bufsz == brw->curbe.last_bufsz &&
       memcmp(buf, brw->curbe.last_buf, bufsz) == 0) {
      /* constants have not changed */
   } else {
      /* Update the record of what our last set of constants was.  We
       * don't just flip the pointers because we don't fill in the
       * data in the padding between the entries.
       */
      memcpy(brw->curbe.last_buf, buf, bufsz);
      brw->curbe.last_bufsz = bufsz;

      if (brw->curbe.curbe_bo != NULL &&
	  brw->curbe.curbe_next_offset + bufsz > brw->curbe.curbe_bo->size)
      {
	 drm_intel_gem_bo_unmap_gtt(brw->curbe.curbe_bo);
	 drm_intel_bo_unreference(brw->curbe.curbe_bo);
	 brw->curbe.curbe_bo = NULL;
      }

      if (brw->curbe.curbe_bo == NULL) {
	 /* Allocate a single page for CURBE entries for this batchbuffer.
	  * They're generally around 64b.
	  */
	 brw->curbe.curbe_bo = drm_intel_bo_alloc(brw->intel.bufmgr, "CURBE",
						  4096, 1 << 6);
	 brw->curbe.curbe_next_offset = 0;
	 drm_intel_gem_bo_map_gtt(brw->curbe.curbe_bo);
	 assert(bufsz < 4096);
      }

      brw->curbe.curbe_offset = brw->curbe.curbe_next_offset;
      brw->curbe.curbe_next_offset += bufsz;
      brw->curbe.curbe_next_offset = ALIGN(brw->curbe.curbe_next_offset, 64);

      /* Copy data to the buffer:
       */
      memcpy(brw->curbe.curbe_bo->virtual + brw->curbe.curbe_offset,
	     buf,
	     bufsz);
   }

   brw_add_validated_bo(brw, brw->curbe.curbe_bo);

   /* Because this provokes an action (ie copy the constants into the
    * URB), it shouldn't be shortcircuited if identical to the
    * previous time - because eg. the urb destination may have
    * changed, or the urb contents different to last time.
    *
    * Note that the data referred to is actually copied internally,
    * not just used in place according to passed pointer.
    *
    * It appears that the CS unit takes care of using each available
    * URB entry (Const URB Entry == CURBE) in turn, and issuing
    * flushes as necessary when doublebuffering of CURBEs isn't
    * possible.
    */
}
Ejemplo n.º 8
0
static void
upload_vs_state(struct brw_context *brw)
{
   struct intel_context *intel = &brw->intel;
   GLcontext *ctx = &intel->ctx;
   const struct brw_vertex_program *vp =
      brw_vertex_program_const(brw->vertex_program);
   unsigned int nr_params = vp->program.Base.Parameters->NumParameters;
   drm_intel_bo *constant_bo;
   int i;

   if (vp->use_const_buffer || nr_params == 0) {
      /* Disable the push constant buffers. */
      BEGIN_BATCH(5);
      OUT_BATCH(CMD_3D_CONSTANT_VS_STATE << 16 | (5 - 2));
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();
   } else {
      if (brw->vertex_program->IsNVProgram)
	 _mesa_load_tracked_matrices(ctx);

      /* Updates the ParamaterValues[i] pointers for all parameters of the
       * basic type of PROGRAM_STATE_VAR.
       */
      _mesa_load_state_parameters(ctx, vp->program.Base.Parameters);

      constant_bo = drm_intel_bo_alloc(intel->bufmgr, "VS constant_bo",
				       nr_params * 4 * sizeof(float),
				       4096);
      drm_intel_gem_bo_map_gtt(constant_bo);
      for (i = 0; i < nr_params; i++) {
	 memcpy((char *)constant_bo->virtual + i * 4 * sizeof(float),
		vp->program.Base.Parameters->ParameterValues[i],
		4 * sizeof(float));
      }
      drm_intel_gem_bo_unmap_gtt(constant_bo);

      BEGIN_BATCH(5);
      OUT_BATCH(CMD_3D_CONSTANT_VS_STATE << 16 |
		GEN6_CONSTANT_BUFFER_0_ENABLE |
		(5 - 2));
      OUT_RELOC(constant_bo,
		I915_GEM_DOMAIN_RENDER, 0, /* XXX: bad domain */
		ALIGN(nr_params, 2) / 2 - 1);
      OUT_BATCH(0);
      OUT_BATCH(0);
      OUT_BATCH(0);
      ADVANCE_BATCH();

      drm_intel_bo_unreference(constant_bo);
   }

   intel_batchbuffer_emit_mi_flush(intel->batch);

   BEGIN_BATCH(6);
   OUT_BATCH(CMD_3D_VS_STATE << 16 | (6 - 2));
   OUT_RELOC(brw->vs.prog_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
   OUT_BATCH((0 << GEN6_VS_SAMPLER_COUNT_SHIFT) |
	     (brw->vs.nr_surfaces << GEN6_VS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
   OUT_BATCH(0); /* scratch space base offset */
   OUT_BATCH((1 << GEN6_VS_DISPATCH_START_GRF_SHIFT) |
	     (brw->vs.prog_data->urb_read_length << GEN6_VS_URB_READ_LENGTH_SHIFT) |
	     (0 << GEN6_VS_URB_ENTRY_READ_OFFSET_SHIFT));
   OUT_BATCH((0 << GEN6_VS_MAX_THREADS_SHIFT) |
	     GEN6_VS_STATISTICS_ENABLE);
   ADVANCE_BATCH();

   intel_batchbuffer_emit_mi_flush(intel->batch);
}