Пример #1
0
static void
vmw_swc_destroy(struct svga_winsys_context *swc)
{
   struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
   unsigned i;

   for(i = 0; i < vswc->surface.used; ++i) {
      struct vmw_ctx_validate_item *isurf = &vswc->surface.items[i];
      if (isurf->referenced)
         p_atomic_dec(&isurf->vsurf->validated);
      vmw_svga_winsys_surface_reference(&isurf->vsurf, NULL);
   }

   for(i = 0; i < vswc->shader.used; ++i) {
      struct vmw_ctx_validate_item *ishader = &vswc->shader.items[i];
      if (ishader->referenced)
         p_atomic_dec(&ishader->vshader->validated);
      vmw_svga_winsys_shader_reference(&ishader->vshader, NULL);
   }

   util_hash_table_destroy(vswc->hash);
   pb_validate_destroy(vswc->validate);
   vmw_ioctl_context_destroy(vswc->vws, swc->cid);
#ifdef DEBUG
   debug_flush_ctx_destroy(vswc->fctx);
#endif
   FREE(vswc);
}
Пример #2
0
void radeon_drm_cs_emit_ioctl_oneshot(void *job, int thread_index)
{
    struct radeon_cs_context *csc = ((struct radeon_drm_cs*)job)->cst;
    unsigned i;
    int r;

    r = drmCommandWriteRead(csc->fd, DRM_RADEON_CS,
                            &csc->cs, sizeof(struct drm_radeon_cs));
    if (r) {
	if (r == -ENOMEM)
	    fprintf(stderr, "radeon: Not enough memory for command submission.\n");
	else if (debug_get_bool_option("RADEON_DUMP_CS", false)) {
            unsigned i;

            fprintf(stderr, "radeon: The kernel rejected CS, dumping...\n");
            for (i = 0; i < csc->chunks[0].length_dw; i++) {
                fprintf(stderr, "0x%08X\n", csc->buf[i]);
            }
        } else {
            fprintf(stderr, "radeon: The kernel rejected CS, "
                    "see dmesg for more information (%i).\n", r);
        }
    }

    for (i = 0; i < csc->num_relocs; i++)
        p_atomic_dec(&csc->relocs_bo[i].bo->num_active_ioctls);
    for (i = 0; i < csc->num_slab_buffers; i++)
        p_atomic_dec(&csc->slab_buffers[i].bo->num_active_ioctls);

    radeon_cs_context_cleanup(csc);
}
Пример #3
0
void radeon_drm_cs_emit_ioctl_oneshot(struct radeon_drm_cs *cs, struct radeon_cs_context *csc)
{
    unsigned i;

    if (drmCommandWriteRead(csc->fd, DRM_RADEON_CS,
                            &csc->cs, sizeof(struct drm_radeon_cs))) {
        if (debug_get_bool_option("RADEON_DUMP_CS", FALSE)) {
            unsigned i;

            fprintf(stderr, "radeon: The kernel rejected CS, dumping...\n");
            for (i = 0; i < csc->chunks[0].length_dw; i++) {
                fprintf(stderr, "0x%08X\n", csc->buf[i]);
            }
        } else {
            fprintf(stderr, "radeon: The kernel rejected CS, "
                    "see dmesg for more information.\n");
        }
    }

    if (cs->trace_buf) {
        radeon_dump_cs_on_lockup(cs, csc);
    }

    for (i = 0; i < csc->crelocs; i++)
        p_atomic_dec(&csc->relocs_bo[i]->num_active_ioctls);

    radeon_cs_context_cleanup(csc);
}
Пример #4
0
static boolean radeon_drm_cs_validate(struct radeon_winsys_cs *rcs)
{
    struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
    boolean status =
        cs->csc->used_gart < cs->ws->info.gart_size * 0.8 &&
        cs->csc->used_vram < cs->ws->info.vram_size * 0.8;

    if (status) {
        cs->csc->validated_crelocs = cs->csc->crelocs;
    } else {
        /* Remove lately-added relocations. The validation failed with them
         * and the CS is about to be flushed because of that. Keep only
         * the already-validated relocations. */
        unsigned i;

        for (i = cs->csc->validated_crelocs; i < cs->csc->crelocs; i++) {
            p_atomic_dec(&cs->csc->relocs_bo[i]->num_cs_references);
            radeon_bo_reference(&cs->csc->relocs_bo[i], NULL);
        }
        cs->csc->crelocs = cs->csc->validated_crelocs;

        /* Flush if there are any relocs. Clean up otherwise. */
        if (cs->csc->crelocs) {
            cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
        } else {
            radeon_cs_context_cleanup(cs->csc);

            assert(cs->base.cdw == 0);
            if (cs->base.cdw != 0) {
                fprintf(stderr, "radeon: Unexpected error in %s.\n", __func__);
            }
        }
    }
    return status;
}
Пример #5
0
static void amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
{
   struct amdgpu_cs *cs = amdgpu_cs(rcs);

   amdgpu_destroy_cs_context(cs);
   p_atomic_dec(&cs->ctx->ws->num_cs);
   pb_reference(&cs->big_ib_buffer, NULL);
   FREE(cs);
}
Пример #6
0
static void virgl_vtest_release_all_res(struct virgl_vtest_winsys *vtws,
                                        struct virgl_vtest_cmd_buf *cbuf)
{
   int i;

   for (i = 0; i < cbuf->cres; i++) {
      p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
      virgl_vtest_resource_reference(vtws, &cbuf->res_bo[i], NULL);
   }
   cbuf->cres = 0;
}
Пример #7
0
static void radeon_drm_cs_destroy(struct radeon_winsys_cs *rcs)
{
    struct radeon_drm_cs *cs = radeon_drm_cs(rcs);

    radeon_drm_cs_sync_flush(rcs);
    pipe_semaphore_destroy(&cs->flush_completed);
    radeon_cs_context_cleanup(&cs->csc1);
    radeon_cs_context_cleanup(&cs->csc2);
    p_atomic_dec(&cs->ws->num_cs);
    radeon_destroy_cs_context(&cs->csc1);
    radeon_destroy_cs_context(&cs->csc2);
    FREE(cs);
}
Пример #8
0
/**
 * vmw_swc_surface_clear_reference - Clear referenced info for a surface
 *
 * @swc:   Pointer to an svga_winsys_context
 * @vsurf: Pointer to a vmw_svga_winsys_surface, the referenced info of which
 *         we want to clear
 *
 * This is primarily used by a discard surface map to indicate that the
 * surface data is no longer referenced by a draw call, and mapping it
 * should therefore no longer cause a flush.
 */
void
vmw_swc_surface_clear_reference(struct svga_winsys_context *swc,
                                struct vmw_svga_winsys_surface *vsurf)
{
   struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
   struct vmw_ctx_validate_item *isrf =
      util_hash_table_get(vswc->hash, vsurf);

   if (isrf && isrf->referenced) {
      isrf->referenced = FALSE;
      p_atomic_dec(&vsurf->validated);
   }
}
Пример #9
0
static void radeon_drm_cs_destroy(struct radeon_cmdbuf *rcs)
{
    struct radeon_drm_cs *cs = radeon_drm_cs(rcs);

    radeon_drm_cs_sync_flush(rcs);
    util_queue_fence_destroy(&cs->flush_completed);
    radeon_cs_context_cleanup(&cs->csc1);
    radeon_cs_context_cleanup(&cs->csc2);
    p_atomic_dec(&cs->ws->num_cs);
    radeon_destroy_cs_context(&cs->csc1);
    radeon_destroy_cs_context(&cs->csc2);
    radeon_fence_reference(&cs->next_fence, NULL);
    FREE(cs);
}
Пример #10
0
static void radeon_cs_context_cleanup(struct radeon_cs_context *csc)
{
    unsigned i;

    for (i = 0; i < csc->num_relocs; i++) {
        p_atomic_dec(&csc->relocs_bo[i].bo->num_cs_references);
        radeon_bo_reference(&csc->relocs_bo[i].bo, NULL);
    }
    for (i = 0; i < csc->num_slab_buffers; ++i) {
        p_atomic_dec(&csc->slab_buffers[i].bo->num_cs_references);
        radeon_bo_reference(&csc->slab_buffers[i].bo, NULL);
    }

    csc->num_relocs = 0;
    csc->num_validated_relocs = 0;
    csc->num_slab_buffers = 0;
    csc->chunks[0].length_dw = 0;
    csc->chunks[1].length_dw = 0;

    for (i = 0; i < ARRAY_SIZE(csc->reloc_indices_hashlist); i++) {
        csc->reloc_indices_hashlist[i] = -1;
    }
}
Пример #11
0
static void radeon_cs_context_cleanup(struct radeon_cs_context *csc)
{
    unsigned i;

    for (i = 0; i < csc->crelocs; i++) {
        p_atomic_dec(&csc->relocs_bo[i]->num_cs_references);
        radeon_bo_reference(&csc->relocs_bo[i], NULL);
    }

    csc->crelocs = 0;
    csc->validated_crelocs = 0;
    csc->chunks[0].length_dw = 0;
    csc->chunks[1].length_dw = 0;
    csc->used_gart = 0;
    csc->used_vram = 0;
    memset(csc->is_handle_added, 0, sizeof(csc->is_handle_added));
}
Пример #12
0
static void amdgpu_cs_context_cleanup(struct amdgpu_cs *cs)
{
   unsigned i;

   for (i = 0; i < cs->num_buffers; i++) {
      p_atomic_dec(&cs->buffers[i].bo->num_cs_references);
      amdgpu_winsys_bo_reference(&cs->buffers[i].bo, NULL);
      cs->handles[i] = NULL;
      cs->flags[i] = 0;
   }

   cs->num_buffers = 0;
   cs->used_gart = 0;
   cs->used_vram = 0;

   for (i = 0; i < Elements(cs->buffer_indices_hashlist); i++) {
      cs->buffer_indices_hashlist[i] = -1;
   }
}
Пример #13
0
static void radeon_drm_cs_destroy(struct radeon_winsys_cs *rcs)
{
    struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
    radeon_drm_cs_sync_flush(cs);
    if (cs->thread) {
        cs->kill_thread = 1;
        pipe_semaphore_signal(&cs->flush_queued);
        pipe_semaphore_wait(&cs->flush_completed);
        pipe_thread_wait(cs->thread);
    }
    pipe_semaphore_destroy(&cs->flush_queued);
    pipe_semaphore_destroy(&cs->flush_completed);
    radeon_cs_context_cleanup(&cs->csc1);
    radeon_cs_context_cleanup(&cs->csc2);
    p_atomic_dec(&cs->ws->num_cs);
    radeon_destroy_cs_context(&cs->csc1);
    radeon_destroy_cs_context(&cs->csc2);
    FREE(cs);
}
Пример #14
0
static void radeon_cs_context_cleanup(struct radeon_cs_context *csc)
{
    unsigned i;

    for (i = 0; i < csc->crelocs; i++) {
        p_atomic_dec(&csc->relocs_bo[i]->num_cs_references);
        radeon_bo_reference(&csc->relocs_bo[i], NULL);
    }

    csc->crelocs = 0;
    csc->validated_crelocs = 0;
    csc->chunks[0].length_dw = 0;
    csc->chunks[1].length_dw = 0;
    csc->used_gart = 0;
    csc->used_vram = 0;

    for (i = 0; i < Elements(csc->reloc_indices_hashlist); i++) {
        csc->reloc_indices_hashlist[i] = -1;
    }
}
Пример #15
0
static enum pipe_error
vmw_swc_flush(struct svga_winsys_context *swc,
              struct pipe_fence_handle **pfence)
{
   struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
   struct pipe_fence_handle *fence = NULL;
   unsigned i;
   enum pipe_error ret;

   ret = pb_validate_validate(vswc->validate);
   assert(ret == PIPE_OK);
   if(ret == PIPE_OK) {
   
      /* Apply relocations */
      for(i = 0; i < vswc->region.used; ++i) {
         struct vmw_buffer_relocation *reloc = &vswc->region.relocs[i];
         struct SVGAGuestPtr ptr;

         if(!vmw_gmr_bufmgr_region_ptr(reloc->buffer, &ptr))
            assert(0);

         ptr.offset += reloc->offset;

	 if (reloc->is_mob) {
	    if (reloc->mob.id)
	       *reloc->mob.id = ptr.gmrId;
	    if (reloc->mob.offset_into_mob)
	       *reloc->mob.offset_into_mob = ptr.offset;
	    else {
	       assert(ptr.offset == 0);
	    }
	 } else
	    *reloc->region.where = ptr;
      }

      if (vswc->command.used || pfence != NULL)
         vmw_ioctl_command(vswc->vws,
			   vswc->base.cid,
			   0,
                           vswc->command.buffer,
                           vswc->command.used,
                           &fence);

      pb_validate_fence(vswc->validate, fence);
   }

   vswc->command.used = 0;
   vswc->command.reserved = 0;

   for(i = 0; i < vswc->surface.used + vswc->surface.staged; ++i) {
      struct vmw_ctx_validate_item *isurf = &vswc->surface.items[i];
      if (isurf->referenced)
         p_atomic_dec(&isurf->vsurf->validated);
      vmw_svga_winsys_surface_reference(&isurf->vsurf, NULL);
   }

   util_hash_table_clear(vswc->hash);
   vswc->surface.used = 0;
   vswc->surface.reserved = 0;

   for(i = 0; i < vswc->shader.used + vswc->shader.staged; ++i) {
      struct vmw_ctx_validate_item *ishader = &vswc->shader.items[i];
      if (ishader->referenced)
         p_atomic_dec(&ishader->vshader->validated);
      vmw_svga_winsys_shader_reference(&ishader->vshader, NULL);
   }

   vswc->shader.used = 0;
   vswc->shader.reserved = 0;

   vswc->region.used = 0;
   vswc->region.reserved = 0;

#ifdef DEBUG
   vswc->must_flush = FALSE;
   debug_flush_flush(vswc->fctx);
#endif
   vswc->preemptive_flush = FALSE;
   vswc->seen_surfaces = 0;
   vswc->seen_regions = 0;
   vswc->seen_mobs = 0;

   if(pfence)
      vmw_fence_reference(vswc->vws, pfence, fence);

   vmw_fence_reference(vswc->vws, &fence, NULL);

   return ret;
}