Ejemplo n.º 1
0
void intelWaitIrq( intelContextPtr intel, int seq )
{
   int ret;
      
   if (0)
      fprintf(stderr, "%s %d\n", __FUNCTION__, seq );

   intel->iw.irq_seq = seq;
	 
   do {
     ret = drmCommandWrite( intel->driFd, DRM_I830_IRQ_WAIT, &intel->iw, sizeof(intel->iw) );
   } while (ret == -EAGAIN || ret == -EINTR);

   if ( ret ) {
      fprintf( stderr, "%s: drmI830IrqWait: %d\n", __FUNCTION__, ret );
      if (0)
	 intel_dump_batchbuffer( intel->alloc.offset,
				 intel->alloc.ptr,
				 intel->alloc.size );
      exit(1);
   }
}
Ejemplo n.º 2
0
void intelFlushBatchLocked( intelContextPtr intel, 
			    GLboolean ignore_cliprects,
			    GLboolean refill,
			    GLboolean allow_unlock)
{
   drmI830BatchBuffer batch;

   assert(intel->locked);

   if (0)
      fprintf(stderr, "%s used %d of %d offset %x..%x refill %d (started in %s)\n",
	      __FUNCTION__, 
	      (intel->batch.size - intel->batch.space), 
	      intel->batch.size,
	      intel->batch.start_offset,
	      intel->batch.start_offset + 
	      (intel->batch.size - intel->batch.space), 
	      refill,
	      intel->batch.func);

   /* Throw away non-effective packets.  Won't work once we have
    * hardware contexts which would preserve statechanges beyond a
    * single buffer.
    */
   if (intel->numClipRects == 0 && !ignore_cliprects) {
      
      /* Without this yeild, an application with no cliprects can hog
       * the hardware.  Without unlocking, the effect is much worse -
       * effectively a lock-out of other contexts.
       */
      if (allow_unlock) {
	 UNLOCK_HARDWARE( intel );
	 sched_yield();
	 LOCK_HARDWARE( intel );
      }

      /* Note that any state thought to have been emitted actually
       * hasn't:
       */
      intel->batch.ptr -= (intel->batch.size - intel->batch.space);
      intel->batch.space = intel->batch.size;
      intel->vtbl.lost_hardware( intel ); 
   }

   if (intel->batch.space != intel->batch.size) {

      if (intel->sarea->ctxOwner != intel->hHWContext) {
	 intel->perf_boxes |= I830_BOX_LOST_CONTEXT;
	 intel->sarea->ctxOwner = intel->hHWContext;
      }

      batch.start = intel->batch.start_offset;
      batch.used = intel->batch.size - intel->batch.space;
      batch.cliprects = intel->pClipRects;
      batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects;
      batch.DR1 = 0;
      batch.DR4 = ((((GLuint)intel->drawX) & 0xffff) | 
		   (((GLuint)intel->drawY) << 16));
      
      if (intel->alloc.offset) {
	 if ((batch.used & 0x4) == 0) {
	    ((int *)intel->batch.ptr)[0] = 0;
	    ((int *)intel->batch.ptr)[1] = MI_BATCH_BUFFER_END;
	    batch.used += 0x8;
	    intel->batch.ptr += 0x8;
	 }
	 else {
	    ((int *)intel->batch.ptr)[0] = MI_BATCH_BUFFER_END;
	    batch.used += 0x4;
	    intel->batch.ptr += 0x4;
	 }      
      }

      if (0)
 	 intel_dump_batchbuffer( batch.start,
				 (int *)(intel->batch.ptr - batch.used),
				 batch.used );

      intel->batch.start_offset += batch.used;
      intel->batch.size -= batch.used;

      if (intel->batch.size < 8) {
	 refill = GL_TRUE;
	 intel->batch.space = intel->batch.size = 0;
      }
      else {
	 intel->batch.size -= 8;
	 intel->batch.space = intel->batch.size;
      }


      assert(intel->batch.space >= 0);
      assert(batch.start >= intel->alloc.offset);
      assert(batch.start < intel->alloc.offset + intel->alloc.size);
      assert(batch.start + batch.used > intel->alloc.offset);
      assert(batch.start + batch.used <= 
	     intel->alloc.offset + intel->alloc.size);


      if (intel->alloc.offset) {
	 if (drmCommandWrite (intel->driFd, DRM_I830_BATCHBUFFER, &batch, 
			      sizeof(batch))) {
	    fprintf(stderr, "DRM_I830_BATCHBUFFER: %d\n",  -errno);
	    UNLOCK_HARDWARE(intel);
	    exit(1);
	 }
      } else {
	 drmI830CmdBuffer cmd;
	 cmd.buf = (char *)intel->alloc.ptr + batch.start;
	 cmd.sz = batch.used;
	 cmd.DR1 = batch.DR1;
	 cmd.DR4 = batch.DR4;
	 cmd.num_cliprects = batch.num_cliprects;
	 cmd.cliprects = batch.cliprects;
	 
	 if (drmCommandWrite (intel->driFd, DRM_I830_CMDBUFFER, &cmd, 
			      sizeof(cmd))) {
	    fprintf(stderr, "DRM_I830_CMDBUFFER: %d\n",  -errno);
	    UNLOCK_HARDWARE(intel);
	    exit(1);
	 }
      }	 

      
      age_intel(intel, intel->sarea->last_enqueue);

      /* FIXME: use hardware contexts to avoid 'losing' hardware after
       * each buffer flush.
       */
      if (intel->batch.contains_geometry) 
	 assert(intel->batch.last_emit_state == intel->batch.counter);

      intel->batch.counter++;
      intel->batch.contains_geometry = 0;
      intel->batch.func = 0;
      intel->vtbl.lost_hardware( intel );
   }

   if (refill)
      intelRefillBatchLocked( intel, allow_unlock );
}
Ejemplo n.º 3
0
/* TODO: Push this whole function into bufmgr.
 */
static void
do_flush_locked(struct intel_batchbuffer *batch,
                GLuint used,
                GLboolean ignore_cliprects, GLboolean allow_unlock)
{
   GLuint *ptr;
   GLuint i;
   struct intel_context *intel = batch->intel;
   unsigned fenceFlags;
   struct _DriFenceObject *fo;

   driBOValidateList(batch->intel->driFd, &batch->list);

   /* Apply the relocations.  This nasty map indicates to me that the
    * whole task should be done internally by the memory manager, and
    * that dma buffers probably need to be pinned within agp space.
    */
   ptr = (GLuint *) driBOMap(batch->buffer, DRM_BO_FLAG_WRITE,
                             DRM_BO_HINT_ALLOW_UNFENCED_MAP);


   for (i = 0; i < batch->nr_relocs; i++) {
      struct buffer_reloc *r = &batch->reloc[i];

      ptr[r->offset / 4] = driBOOffset(r->buf) + r->delta;
   }

   if (INTEL_DEBUG & DEBUG_BATCH)
      intel_dump_batchbuffer(0, ptr, used);

   driBOUnmap(batch->buffer);
   batch->map = NULL;

   /* Throw away non-effective packets.  Won't work once we have
    * hardware contexts which would preserve statechanges beyond a
    * single buffer.
    */

   if (!(intel->numClipRects == 0 && !ignore_cliprects)) {
      intel_batch_ioctl(batch->intel,
                        driBOOffset(batch->buffer),
                        used, ignore_cliprects, allow_unlock);
   }


   /*
    * Kernel fencing. The flags tells the kernel that we've 
    * programmed an MI_FLUSH.
    */
   
   fenceFlags = DRM_I915_FENCE_FLAG_FLUSHED;
   fo = driFenceBuffers(batch->intel->driFd,
			"Batch fence", fenceFlags);

   /*
    * User space fencing.
    */

   driBOFence(batch->buffer, fo);

   if (driFenceType(fo) == DRM_FENCE_TYPE_EXE) {

     /*
      * Oops. We only validated a batch buffer. This means we
      * didn't do any proper rendering. Discard this fence object.
      */

      driFenceUnReference(fo);
   } else {
      driFenceUnReference(batch->last_fence);
      batch->last_fence = fo;
      for (i = 0; i < batch->nr_relocs; i++) {
	struct buffer_reloc *r = &batch->reloc[i];
	driBOFence(r->buf, fo);
      }
   }

   if (intel->numClipRects == 0 && !ignore_cliprects) {
      if (allow_unlock) {
         UNLOCK_HARDWARE(intel);
         sched_yield();
         LOCK_HARDWARE(intel);
      }
      intel->vtbl.lost_hardware(intel);
   }
}