Exemple #1
0
VAStatus
vlVaDestroySurfaces(VADriverContextP ctx, VASurfaceID *surface_list, int num_surfaces)
{
   vlVaDriver *drv;
   int i;

   if (!ctx)
      return VA_STATUS_ERROR_INVALID_CONTEXT;

   drv = VL_VA_DRIVER(ctx);
   pipe_mutex_lock(drv->mutex);
   for (i = 0; i < num_surfaces; ++i) {
      vlVaSurface *surf = handle_table_get(drv->htab, surface_list[i]);
      if (surf->buffer)
         surf->buffer->destroy(surf->buffer);
      util_dynarray_fini(&surf->subpics);
      FREE(surf);
      handle_table_remove(drv->htab, surface_list[i]);
   }
   pipe_mutex_unlock(drv->mutex);

   return VA_STATUS_SUCCESS;
}
Exemple #2
0
static void
rbug_set_constant_buffer(struct pipe_context *_pipe,
                         uint shader,
                         uint index,
                         struct pipe_constant_buffer *_cb)
{
   struct rbug_context *rb_pipe = rbug_context(_pipe);
   struct pipe_context *pipe = rb_pipe->pipe;
   struct pipe_constant_buffer cb;

   /* XXX hmm? unwrap the input state */
   if (_cb) {
      cb = *_cb;
      cb.buffer = rbug_resource_unwrap(_cb->buffer);
   }

   pipe_mutex_lock(rb_pipe->call_mutex);
   pipe->set_constant_buffer(pipe,
                             shader,
                             index,
                             _cb ? &cb : NULL);
   pipe_mutex_unlock(rb_pipe->call_mutex);
}
Exemple #3
0
void amdgpu_bo_destroy(struct pb_buffer *_buf)
{
   struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
   int i;

   pipe_mutex_lock(bo->ws->global_bo_list_lock);
   LIST_DEL(&bo->global_list_item);
   bo->ws->num_buffers--;
   pipe_mutex_unlock(bo->ws->global_bo_list_lock);

   amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
   amdgpu_va_range_free(bo->va_handle);
   amdgpu_bo_free(bo->bo);

   for (i = 0; i < RING_LAST; i++)
      amdgpu_fence_reference(&bo->fence[i], NULL);

   if (bo->initial_domain & RADEON_DOMAIN_VRAM)
      bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->info.gart_page_size);
   else if (bo->initial_domain & RADEON_DOMAIN_GTT)
      bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->info.gart_page_size);
   FREE(bo);
}
static void
pb_debug_manager_dump(struct pb_debug_manager *mgr)
{
   struct list_head *curr, *next;
   struct pb_debug_buffer *buf;

   pipe_mutex_lock(mgr->mutex);
      
   curr = mgr->list.next;
   next = curr->next;
   while(curr != &mgr->list) {
      buf = LIST_ENTRY(struct pb_debug_buffer, curr, head);

      debug_printf("buffer = %p\n", buf);
      debug_printf("    .size = 0x%x\n", buf->base.base.size);
      debug_backtrace_dump(buf->create_backtrace, PB_DEBUG_CREATE_BACKTRACE);
      
      curr = next; 
      next = curr->next;
   }

   pipe_mutex_unlock(mgr->mutex);
}
Exemple #5
0
VAStatus
vlVaBufferInfo(VADriverContextP ctx, VABufferID buf_id, VABufferType *type,
               unsigned int *size, unsigned int *num_elements)
{
   vlVaDriver *drv;
   vlVaBuffer *buf;

   if (!ctx)
      return VA_STATUS_ERROR_INVALID_CONTEXT;

   drv = VL_VA_DRIVER(ctx);
   pipe_mutex_lock(drv->mutex);
   buf = handle_table_get(drv->htab, buf_id);
   pipe_mutex_unlock(drv->mutex);
   if (!buf)
      return VA_STATUS_ERROR_INVALID_BUFFER;

   *type = buf->type;
   *size = buf->size;
   *num_elements = buf->num_elements;

   return VA_STATUS_SUCCESS;
}
Exemple #6
0
/**
 * Have ptr reference fb.  The referenced framebuffer should be locked.
 */
void
stw_framebuffer_reference(
   struct stw_framebuffer **ptr,
   struct stw_framebuffer *fb)
{
   struct stw_framebuffer *old_fb = *ptr;

   if (old_fb == fb)
      return;

   if (fb)
      fb->refcnt++;
   if (old_fb) {
      pipe_mutex_lock(stw_dev->fb_mutex);

      pipe_mutex_lock(old_fb->mutex);
      stw_framebuffer_destroy_locked(old_fb);

      pipe_mutex_unlock(stw_dev->fb_mutex);
   }

   *ptr = fb;
}
Exemple #7
0
/** Rasterize all scene's bins */
static void
lp_setup_rasterize_scene( struct lp_setup_context *setup )
{
   struct lp_scene *scene = setup->scene;
   struct llvmpipe_screen *screen = llvmpipe_screen(scene->pipe->screen);

   lp_scene_end_binning(scene);

   lp_fence_reference(&setup->last_fence, scene->fence);

   if (setup->last_fence)
      setup->last_fence->issued = TRUE;

   pipe_mutex_lock(screen->rast_mutex);
   lp_rast_queue_scene(screen->rast, scene);
   lp_rast_finish(screen->rast);
   pipe_mutex_unlock(screen->rast_mutex);

   lp_scene_end_rasterization(setup->scene);
   lp_setup_reset( setup );

   LP_DBG(DEBUG_SETUP, "%s done \n", __FUNCTION__);
}
Exemple #8
0
static void
rbug_set_sampler_views(struct pipe_context *_pipe,
                       unsigned shader,
                       unsigned start,
                       unsigned num,
                       struct pipe_sampler_view **_views)
{
    struct rbug_context *rb_pipe = rbug_context(_pipe);
    struct pipe_context *pipe = rb_pipe->pipe;
    struct pipe_sampler_view *unwrapped_views[PIPE_MAX_SHADER_SAMPLER_VIEWS];
    struct pipe_sampler_view **views = NULL;
    unsigned i;

    assert(start == 0); /* XXX fix */

    /* must protect curr status */
    pipe_mutex_lock(rb_pipe->call_mutex);

    rb_pipe->curr.num_views[shader] = 0;
    memset(rb_pipe->curr.views[shader], 0, sizeof(rb_pipe->curr.views[shader]));
    memset(rb_pipe->curr.texs[shader], 0, sizeof(rb_pipe->curr.texs[shader]));
    memset(unwrapped_views, 0, sizeof(unwrapped_views));

    if (_views) {
        rb_pipe->curr.num_views[shader] = num;
        for (i = 0; i < num; i++) {
            rb_pipe->curr.views[shader][i] = rbug_sampler_view(_views[i]);
            rb_pipe->curr.texs[shader][i] = rbug_resource(_views[i] ? _views[i]->texture : NULL);
            unwrapped_views[i] = rbug_sampler_view_unwrap(_views[i]);
        }
        views = unwrapped_views;
    }

    pipe->set_sampler_views(pipe, shader, start, num, views);

    pipe_mutex_unlock(rb_pipe->call_mutex);
}
static void
svga_buffer_transfer_unmap( struct pipe_context *pipe,
                            struct pipe_transfer *transfer )
{
   struct svga_screen *ss = svga_screen(pipe->screen);
   struct svga_winsys_screen *sws = ss->sws;
   struct svga_buffer *sbuf = svga_buffer(transfer->resource);
   
   pipe_mutex_lock(ss->swc_mutex);
   
   assert(sbuf->map.count);
   if (sbuf->map.count) {
      --sbuf->map.count;
   }

   if (sbuf->hwbuf) {
      sws->buffer_unmap(sws, sbuf->hwbuf);
   }

   if (transfer->usage & PIPE_TRANSFER_WRITE) {
      if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
         /*
          * Mapped range not flushed explicitly, so flush the whole buffer,
          * and tell the host to discard the contents when processing the DMA
          * command.
          */

         SVGA_DBG(DEBUG_DMA, "flushing the whole buffer\n");
   
         sbuf->dma.flags.discard = TRUE;

         svga_buffer_add_range(sbuf, 0, sbuf->b.b.width0);
      }
   }

   pipe_mutex_unlock(ss->swc_mutex);
}
Exemple #10
0
/**
 * Return pointer to next bin to be rendered.
 * The lp_scene::curr_x and ::curr_y fields will be advanced.
 * Multiple rendering threads will call this function to get a chunk
 * of work (a bin) to work on.
 */
struct cmd_bin *
lp_scene_bin_iter_next( struct lp_scene *scene )
{
   struct cmd_bin *bin = NULL;

   pipe_mutex_lock(scene->mutex);

   if (scene->curr_x < 0) {
      /* first bin */
      scene->curr_x = 0;
      scene->curr_y = 0;
   }
   else if (!next_bin(scene)) {
      /* no more bins left */
      goto end;
   }

   bin = lp_scene_get_bin(scene, scene->curr_x, scene->curr_y);

end:
   /*printf("return bin %p at %d, %d\n", (void *) bin, *bin_x, *bin_y);*/
   pipe_mutex_unlock(scene->mutex);
   return bin;
}
static boolean debug_serial(void* p, unsigned* pserial)
{
   unsigned serial;
   boolean found = TRUE;
#ifdef PIPE_SUBSYSTEM_WINDOWS_USER
   static boolean first = TRUE;

   if (first) {
      pipe_mutex_init(serials_mutex);
      first = FALSE;
   }
#endif

   pipe_mutex_lock(serials_mutex);
   if(!serials_hash)
      serials_hash = util_hash_table_create(hash_ptr, compare_ptr);
   serial = (unsigned)(uintptr_t)util_hash_table_get(serials_hash, p);
   if(!serial)
   {
      /* time to stop logging... (you'll have a 100 GB logfile at least at this point)
       * TODO: avoid this
       */
      serial = ++serials_last;
      if(!serial)
      {
         debug_error("More than 2^32 objects detected, aborting.\n");
         os_abort();
      }

      util_hash_table_set(serials_hash, p, (void*)(uintptr_t)serial);
      found = FALSE;
   }
   pipe_mutex_unlock(serials_mutex);
   *pserial = serial;
   return found;
}
Exemple #12
0
VAStatus
vlVaReleaseBufferHandle(VADriverContextP ctx, VABufferID buf_id)
{
   vlVaDriver *drv;
   vlVaBuffer *buf;

   if (!ctx)
      return VA_STATUS_ERROR_INVALID_CONTEXT;

   drv = VL_VA_DRIVER(ctx);
   pipe_mutex_lock(drv->mutex);
   buf = handle_table_get(drv->htab, buf_id);
   pipe_mutex_unlock(drv->mutex);

   if (!buf)
      return VA_STATUS_ERROR_INVALID_BUFFER;

   if (buf->export_refcount == 0)
      return VA_STATUS_ERROR_INVALID_BUFFER;

   if (--buf->export_refcount == 0) {
      VABufferInfo * const buf_info = &buf->export_state;

      switch (buf_info->mem_type) {
      case VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME:
         close((intptr_t)buf_info->handle);
         break;
      default:
         return VA_STATUS_ERROR_INVALID_BUFFER;
      }

      buf_info->mem_type = 0;
   }

   return VA_STATUS_SUCCESS;
}
void util_ringbuffer_enqueue( struct util_ringbuffer *ring,
                              const struct util_packet *packet )
{
   unsigned i;

   /* XXX: over-reliance on mutexes, etc:
    */
   pipe_mutex_lock(ring->mutex);

   /* make sure we don't request an impossible amount of space
    */
   assert(packet->dwords <= ring->mask);

   /* Wait for free space:
    */
   while (util_ringbuffer_space(ring) < packet->dwords)
      pipe_condvar_wait(ring->change, ring->mutex);

   /* Copy data to ring:
    */
   for (i = 0; i < packet->dwords; i++) {

      /* Copy all dwords of the packet.  Note we're abusing the
       * typesystem a little - we're being passed a pointer to
       * something, but probably not an array of packet structs:
       */
      ring->buf[ring->head] = packet[i];
      ring->head++;
      ring->head &= ring->mask;
   }

   /* Signal change:
    */
   pipe_condvar_signal(ring->change);
   pipe_mutex_unlock(ring->mutex);
}
void *
rtasm_exec_malloc(size_t size)
{
   struct mem_block *block = NULL;
   void *addr = NULL;

   pipe_mutex_lock(exec_mutex);

   init_heap();

   if (exec_heap) {
      size = (size + 31) & ~31;  /* next multiple of 32 bytes */
      block = u_mmAllocMem( exec_heap, size, 5, 0 ); /* 5 -> 32-byte alignment */
   }

   if (block)
      addr = exec_mem + block->ofs;
   else 
      debug_printf("rtasm_exec_malloc failed\n");
   
   pipe_mutex_unlock(exec_mutex);
   
   return addr;
}
Exemple #15
0
void
stw_framebuffer_cleanup( void )
{
   struct stw_framebuffer *fb;
   struct stw_framebuffer *next;

   if (!stw_dev)
      return;

   pipe_mutex_lock( stw_dev->fb_mutex );

   fb = stw_dev->fb_head;
   while (fb) {
      next = fb->next;
      
      pipe_mutex_lock(fb->mutex);
      stw_framebuffer_destroy_locked(fb);
      
      fb = next;
   }
   stw_dev->fb_head = NULL;
   
   pipe_mutex_unlock( stw_dev->fb_mutex );
}
Exemple #16
0
BOOL APIENTRY
DrvShareLists(
    DHGLRC dhglrc1,
    DHGLRC dhglrc2 )
{
    struct stw_context *ctx1;
    struct stw_context *ctx2;
    BOOL ret = FALSE;

    if (!stw_dev)
        return FALSE;

    pipe_mutex_lock( stw_dev->ctx_mutex );

    ctx1 = stw_lookup_context_locked( dhglrc1 );
    ctx2 = stw_lookup_context_locked( dhglrc2 );

    if (ctx1 && ctx2 && ctx2->st->share)
        ret = ctx2->st->share(ctx2->st, ctx1->st);

    pipe_mutex_unlock( stw_dev->ctx_mutex );

    return ret;
}
static void
fenced_buffer_fence(struct pb_buffer *buf,
                    struct pipe_fence_handle *fence)
{
   struct fenced_buffer *fenced_buf = fenced_buffer(buf);
   struct fenced_manager *fenced_mgr = fenced_buf->mgr;
   struct pb_fence_ops *ops = fenced_mgr->ops;

   pipe_mutex_lock(fenced_mgr->mutex);

   assert(pipe_is_referenced(&fenced_buf->base.base.reference));
   assert(fenced_buf->buffer);

   if(fence != fenced_buf->fence) {
      assert(fenced_buf->vl);
      assert(fenced_buf->validation_flags);

      if (fenced_buf->fence) {
         boolean destroyed;
         destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
         assert(!destroyed);
      }
      if (fence) {
         ops->fence_reference(ops, &fenced_buf->fence, fence);
         fenced_buf->flags |= fenced_buf->validation_flags;
         fenced_buffer_add_locked(fenced_mgr, fenced_buf);
      }

      pb_fence(fenced_buf->buffer, fence);

      fenced_buf->vl = NULL;
      fenced_buf->validation_flags = 0;
   }

   pipe_mutex_unlock(fenced_mgr->mutex);
}
static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
                                                    struct virgl_hw_res *res,
                                                    uint32_t stride,
                                                    struct winsys_handle *whandle)
 {
   struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
   struct drm_gem_flink flink;

   if (!res)
       return FALSE;

   if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
      if (!res->flinked) {
         memset(&flink, 0, sizeof(flink));
         flink.handle = res->bo_handle;

         if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
            return FALSE;
         }
         res->flinked = TRUE;
         res->flink = flink.name;

         pipe_mutex_lock(qdws->bo_handles_mutex);
         util_hash_table_set(qdws->bo_names, (void *)(uintptr_t)res->flink, res);
         pipe_mutex_unlock(qdws->bo_handles_mutex);
      }
      whandle->handle = res->flink;
   } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
      whandle->handle = res->bo_handle;
   } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
      if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
            return FALSE;
   }
   whandle->stride = stride;
   return TRUE;
}
Exemple #19
0
bool
hsp_copy_context(uint64 srcCtxId, uint64 dstCtxId, uint mask)
{
	TRACE("%s(src: %d dst: %d mask: %d)\n", __FUNCTION__,
		srcCtxId, dstCtxId, mask);
	struct hsp_context *src;
	struct hsp_context *dst;
	bool ret = false;

	pipe_mutex_lock(hsp_dev->mutex);

	src = hsp_lookup_context(srcCtxId);
	dst = hsp_lookup_context(dstCtxId);

	if (src && dst) {
		#warning Implement copying context!
		(void)src;
		(void)dst;
		(void)mask;
	}

	pipe_mutex_unlock(hsp_dev->mutex);
	return ret;
}
PUBLIC struct radeon_winsys *
radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
{
    struct radeon_drm_winsys *ws;

    pipe_mutex_lock(fd_tab_mutex);
    if (!fd_tab) {
        fd_tab = util_hash_table_create(hash_fd, compare_fd);
    }

    ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
    if (ws) {
        pipe_reference(NULL, &ws->reference);
        pipe_mutex_unlock(fd_tab_mutex);
        return &ws->base;
    }

    ws = CALLOC_STRUCT(radeon_drm_winsys);
    if (!ws) {
        pipe_mutex_unlock(fd_tab_mutex);
        return NULL;
    }

    ws->fd = dup(fd);

    if (!do_winsys_init(ws))
        goto fail;

    /* Create managers. */
    ws->kman = radeon_bomgr_create(ws);
    if (!ws->kman)
        goto fail;

    ws->cman = pb_cache_manager_create(ws->kman, 500000, 2.0f, 0,
                                       MIN2(ws->info.vram_size, ws->info.gart_size));
    if (!ws->cman)
        goto fail;

    if (ws->gen >= DRV_R600) {
        ws->surf_man = radeon_surface_manager_new(ws->fd);
        if (!ws->surf_man)
            goto fail;
    }

    /* init reference */
    pipe_reference_init(&ws->reference, 1);

    /* Set functions. */
    ws->base.unref = radeon_winsys_unref;
    ws->base.destroy = radeon_winsys_destroy;
    ws->base.query_info = radeon_query_info;
    ws->base.cs_request_feature = radeon_cs_request_feature;
    ws->base.query_value = radeon_query_value;
    ws->base.read_registers = radeon_read_registers;

    radeon_bomgr_init_functions(ws);
    radeon_drm_cs_init_functions(ws);
    radeon_surface_init_functions(ws);

    pipe_mutex_init(ws->hyperz_owner_mutex);
    pipe_mutex_init(ws->cmask_owner_mutex);
    pipe_mutex_init(ws->cs_stack_lock);

    ws->ncs = 0;
    pipe_semaphore_init(&ws->cs_queued, 0);
    if (ws->num_cpus > 1 && debug_get_option_thread())
        ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws);

    /* Create the screen at the end. The winsys must be initialized
     * completely.
     *
     * Alternatively, we could create the screen based on "ws->gen"
     * and link all drivers into one binary blob. */
    ws->base.screen = screen_create(&ws->base);
    if (!ws->base.screen) {
        radeon_winsys_destroy(&ws->base);
        pipe_mutex_unlock(fd_tab_mutex);
        return NULL;
    }

    util_hash_table_set(fd_tab, intptr_to_pointer(ws->fd), ws);

    /* We must unlock the mutex once the winsys is fully initialized, so that
     * other threads attempting to create the winsys from the same fd will
     * get a fully initialized winsys and not just half-way initialized. */
    pipe_mutex_unlock(fd_tab_mutex);

    return &ws->base;

fail:
    pipe_mutex_unlock(fd_tab_mutex);
    if (ws->cman)
        ws->cman->destroy(ws->cman);
    if (ws->kman)
        ws->kman->destroy(ws->kman);
    if (ws->surf_man)
        radeon_surface_manager_free(ws->surf_man);
    if (ws->fd >= 0)
        close(ws->fd);

    FREE(ws);
    return NULL;
}
void trace_dump_call_end(void)
{
   trace_dump_call_end_locked();
   pipe_mutex_unlock(call_mutex);
}
void trace_dumping_stop(void)
{
   pipe_mutex_lock(call_mutex);
   trace_dumping_stop_locked();
   pipe_mutex_unlock(call_mutex);
}
void trace_dump_call_unlock(void)
{
   pipe_mutex_unlock(call_mutex);
}
static struct pb_buffer *
pb_debug_manager_create_buffer(struct pb_manager *_mgr, 
                               pb_size size,
                               const struct pb_desc *desc)
{
   struct pb_debug_manager *mgr = pb_debug_manager(_mgr);
   struct pb_debug_buffer *buf;
   struct pb_desc real_desc;
   pb_size real_size;
   
   assert(size);
   assert(desc->alignment);

   buf = CALLOC_STRUCT(pb_debug_buffer);
   if(!buf)
      return NULL;
   
   real_size = mgr->underflow_size + size + mgr->overflow_size;
   real_desc = *desc;
   real_desc.usage |= PIPE_BUFFER_USAGE_CPU_WRITE;
   real_desc.usage |= PIPE_BUFFER_USAGE_CPU_READ;

   buf->buffer = mgr->provider->create_buffer(mgr->provider, 
                                              real_size, 
                                              &real_desc);
   if(!buf->buffer) {
      FREE(buf);
#if 0
      pipe_mutex_lock(mgr->mutex);
      debug_printf("%s: failed to create buffer\n", __FUNCTION__);
      if(!LIST_IS_EMPTY(&mgr->list))
         pb_debug_manager_dump(mgr);
      pipe_mutex_unlock(mgr->mutex);
#endif
      return NULL;
   }
   
   assert(pipe_is_referenced(&buf->buffer->base.reference));
   assert(pb_check_alignment(real_desc.alignment, buf->buffer->base.alignment));
   assert(pb_check_usage(real_desc.usage, buf->buffer->base.usage));
   assert(buf->buffer->base.size >= real_size);
   
   pipe_reference_init(&buf->base.base.reference, 1);
   buf->base.base.alignment = desc->alignment;
   buf->base.base.usage = desc->usage;
   buf->base.base.size = size;
   
   buf->base.vtbl = &pb_debug_buffer_vtbl;
   buf->mgr = mgr;

   buf->underflow_size = mgr->underflow_size;
   buf->overflow_size = buf->buffer->base.size - buf->underflow_size - size;
   
   debug_backtrace_capture(buf->create_backtrace, 1, PB_DEBUG_CREATE_BACKTRACE);

   pb_debug_buffer_fill(buf);
   
   pipe_mutex_init(buf->mutex);
   
   pipe_mutex_lock(mgr->mutex);
   LIST_ADDTAIL(&buf->head, &mgr->list);
   pipe_mutex_unlock(mgr->mutex);

   return &buf->base;
}
Exemple #25
0
static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs, unsigned flags, uint32_t cs_trace_id)
{
    struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
    struct radeon_cs_context *tmp;

    switch (cs->base.ring_type) {
    case RING_DMA:
        /* pad DMA ring to 8 DWs */
        if (cs->ws->info.chip_class <= SI) {
            while (rcs->cdw & 7)
                OUT_CS(&cs->base, 0xf0000000); /* NOP packet */
        } else {
            while (rcs->cdw & 7)
                OUT_CS(&cs->base, 0x00000000); /* NOP packet */
        }
        break;
    case RING_GFX:
        /* pad DMA ring to 8 DWs to meet CP fetch alignment requirements
         * r6xx, requires at least 4 dw alignment to avoid a hw bug.
         */
        if (flags & RADEON_FLUSH_COMPUTE) {
            if (cs->ws->info.chip_class <= SI) {
                while (rcs->cdw & 7)
                    OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
            } else {
                while (rcs->cdw & 7)
                    OUT_CS(&cs->base, 0xffff1000); /* type3 nop packet */
            }
        } else {
            while (rcs->cdw & 7)
                OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
        }
        break;
    }

    if (rcs->cdw > RADEON_MAX_CMDBUF_DWORDS) {
        fprintf(stderr, "radeon: command stream overflowed\n");
    }

    radeon_drm_cs_sync_flush(rcs);

    /* Flip command streams. */
    tmp = cs->csc;
    cs->csc = cs->cst;
    cs->cst = tmp;

    cs->cst->cs_trace_id = cs_trace_id;

    /* If the CS is not empty or overflowed, emit it in a separate thread. */
    if (cs->base.cdw && cs->base.cdw <= RADEON_MAX_CMDBUF_DWORDS && !debug_get_option_noop()) {
        unsigned i, crelocs = cs->cst->crelocs;

        cs->cst->chunks[0].length_dw = cs->base.cdw;

        for (i = 0; i < crelocs; i++) {
            /* Update the number of active asynchronous CS ioctls for the buffer. */
            p_atomic_inc(&cs->cst->relocs_bo[i]->num_active_ioctls);
        }

        switch (cs->base.ring_type) {
        case RING_DMA:
            cs->cst->flags[0] = 0;
            cs->cst->flags[1] = RADEON_CS_RING_DMA;
            cs->cst->cs.num_chunks = 3;
            if (cs->ws->info.r600_virtual_address) {
                cs->cst->flags[0] |= RADEON_CS_USE_VM;
            }
            break;

        case RING_UVD:
            cs->cst->flags[0] = 0;
            cs->cst->flags[1] = RADEON_CS_RING_UVD;
            cs->cst->cs.num_chunks = 3;
            break;

        default:
        case RING_GFX:
            cs->cst->flags[0] = 0;
            cs->cst->flags[1] = RADEON_CS_RING_GFX;
            cs->cst->cs.num_chunks = 2;
            if (flags & RADEON_FLUSH_KEEP_TILING_FLAGS) {
                cs->cst->flags[0] |= RADEON_CS_KEEP_TILING_FLAGS;
                cs->cst->cs.num_chunks = 3;
            }
            if (cs->ws->info.r600_virtual_address) {
                cs->cst->flags[0] |= RADEON_CS_USE_VM;
                cs->cst->cs.num_chunks = 3;
            }
            if (flags & RADEON_FLUSH_END_OF_FRAME) {
                cs->cst->flags[0] |= RADEON_CS_END_OF_FRAME;
                cs->cst->cs.num_chunks = 3;
            }
            if (flags & RADEON_FLUSH_COMPUTE) {
                cs->cst->flags[1] = RADEON_CS_RING_COMPUTE;
                cs->cst->cs.num_chunks = 3;
            }
            break;
        }

        if (cs->ws->thread && (flags & RADEON_FLUSH_ASYNC)) {
            cs->flush_started = 1;
            radeon_drm_ws_queue_cs(cs->ws, cs);
        } else {
            pipe_mutex_lock(cs->ws->cs_stack_lock);
            if (cs->ws->thread) {
                while (p_atomic_read(&cs->ws->ncs)) {
                    pipe_condvar_wait(cs->ws->cs_queue_empty, cs->ws->cs_stack_lock);
                }
            }
            pipe_mutex_unlock(cs->ws->cs_stack_lock);
            radeon_drm_cs_emit_ioctl_oneshot(cs, cs->cst);
        }
    } else {
        radeon_cs_context_cleanup(cs->cst);
    }

    /* Prepare a new CS. */
    cs->base.buf = cs->csc->buf;
    cs->base.cdw = 0;
}
Exemple #26
0
static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
                                                           struct winsys_handle *whandle,
                                                           unsigned *stride,
                                                           unsigned *size)
{
    struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
    struct radeon_bo *bo;
    struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
    struct drm_gem_open open_arg = {};

    /* We must maintain a list of pairs <handle, bo>, so that we always return
     * the same BO for one particular handle. If we didn't do that and created
     * more than one BO for the same handle and then relocated them in a CS,
     * we would hit a deadlock in the kernel.
     *
     * The list of pairs is guarded by a mutex, of course. */
    pipe_mutex_lock(mgr->bo_handles_mutex);

    /* First check if there already is an existing bo for the handle. */
    bo = util_hash_table_get(mgr->bo_handles, (void*)(uintptr_t)whandle->handle);
    if (bo) {
        /* Increase the refcount. */
        struct pb_buffer *b = NULL;
        pb_reference(&b, &bo->base);
        goto done;
    }

    /* There isn't, create a new one. */
    bo = CALLOC_STRUCT(radeon_bo);
    if (!bo) {
        goto fail;
    }

    /* Open the BO. */
    open_arg.name = whandle->handle;
    if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
        FREE(bo);
        goto fail;
    }
    bo->handle = open_arg.handle;
    bo->size = open_arg.size;
    bo->name = whandle->handle;

    /* Initialize it. */
    pipe_reference_init(&bo->base.base.reference, 1);
    bo->base.base.alignment = 0;
    bo->base.base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
    bo->base.base.size = bo->size;
    bo->base.vtbl = &radeon_bo_vtbl;
    bo->mgr = mgr;
    bo->rws = mgr->rws;
    pipe_mutex_init(bo->map_mutex);

    util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)whandle->handle, bo);

done:
    pipe_mutex_unlock(mgr->bo_handles_mutex);

    if (stride)
        *stride = whandle->stride;
    if (size)
        *size = bo->base.base.size;

    return (struct pb_buffer*)bo;

fail:
    pipe_mutex_unlock(mgr->bo_handles_mutex);
    return NULL;
}
Exemple #27
0
static void *radeon_bo_map_internal(struct pb_buffer *_buf,
                                    unsigned flags, void *flush_ctx)
{
    struct radeon_bo *bo = radeon_bo(_buf);
    struct radeon_drm_cs *cs = flush_ctx;
    struct drm_radeon_gem_mmap args = {};
    void *ptr;

    /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
    if (!(flags & PB_USAGE_UNSYNCHRONIZED)) {
        /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
        if (flags & PB_USAGE_DONTBLOCK) {
            if (!(flags & PB_USAGE_CPU_WRITE)) {
                /* Mapping for read.
                 *
                 * Since we are mapping for read, we don't need to wait
                 * if the GPU is using the buffer for read too
                 * (neither one is changing it).
                 *
                 * Only check whether the buffer is being used for write. */
                if (radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
                    cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
                    return NULL;
                }

                if (radeon_bo_is_busy((struct pb_buffer*)bo,
                                      RADEON_USAGE_WRITE)) {
                    return NULL;
                }
            } else {
                if (radeon_bo_is_referenced_by_cs(cs, bo)) {
                    cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
                    return NULL;
                }

                if (radeon_bo_is_busy((struct pb_buffer*)bo,
                                      RADEON_USAGE_READWRITE)) {
                    return NULL;
                }
            }
        } else {
            if (!(flags & PB_USAGE_CPU_WRITE)) {
                /* Mapping for read.
                 *
                 * Since we are mapping for read, we don't need to wait
                 * if the GPU is using the buffer for read too
                 * (neither one is changing it).
                 *
                 * Only check whether the buffer is being used for write. */
                if (radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
                    cs->flush_cs(cs->flush_data, 0);
                }
                radeon_bo_wait((struct pb_buffer*)bo,
                               RADEON_USAGE_WRITE);
            } else {
                /* Mapping for write. */
                if (radeon_bo_is_referenced_by_cs(cs, bo)) {
                    cs->flush_cs(cs->flush_data, 0);
                } else {
                    /* Try to avoid busy-waiting in radeon_bo_wait. */
                    if (p_atomic_read(&bo->num_active_ioctls))
                        radeon_drm_cs_sync_flush(cs);
                }

                radeon_bo_wait((struct pb_buffer*)bo, RADEON_USAGE_READWRITE);
            }
        }
    }

    /* Return the pointer if it's already mapped. */
    if (bo->ptr)
        return bo->ptr;

    /* Map the buffer. */
    pipe_mutex_lock(bo->map_mutex);
    /* Return the pointer if it's already mapped (in case of a race). */
    if (bo->ptr) {
        pipe_mutex_unlock(bo->map_mutex);
        return bo->ptr;
    }
    args.handle = bo->handle;
    args.offset = 0;
    args.size = (uint64_t)bo->size;
    if (drmCommandWriteRead(bo->rws->fd,
                            DRM_RADEON_GEM_MMAP,
                            &args,
                            sizeof(args))) {
        pipe_mutex_unlock(bo->map_mutex);
        fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n",
                bo, bo->handle);
        return NULL;
    }

    ptr = mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
               bo->rws->fd, args.addr_ptr);
    if (ptr == MAP_FAILED) {
        pipe_mutex_unlock(bo->map_mutex);
        fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno);
        return NULL;
    }
    bo->ptr = ptr;
    pipe_mutex_unlock(bo->map_mutex);

    return bo->ptr;
}
Exemple #28
0
VAStatus
vlVaCreateContext(VADriverContextP ctx, VAConfigID config_id, int picture_width,
                  int picture_height, int flag, VASurfaceID *render_targets,
                  int num_render_targets, VAContextID *context_id)
{
   vlVaDriver *drv;
   vlVaContext *context;
   vlVaConfig *config;
   int is_vpp;

   if (!ctx)
      return VA_STATUS_ERROR_INVALID_CONTEXT;

   drv = VL_VA_DRIVER(ctx);
   pipe_mutex_lock(drv->mutex);
   config = handle_table_get(drv->htab, config_id);
   pipe_mutex_unlock(drv->mutex);

   is_vpp = config->profile == PIPE_VIDEO_PROFILE_UNKNOWN && !picture_width &&
            !picture_height && !flag && !render_targets && !num_render_targets;

   if (!(picture_width && picture_height) && !is_vpp)
      return VA_STATUS_ERROR_INVALID_IMAGE_FORMAT;

   context = CALLOC(1, sizeof(vlVaContext));
   if (!context)
      return VA_STATUS_ERROR_ALLOCATION_FAILED;

   if (is_vpp) {
      context->decoder = NULL;
   } else {
      context->templat.profile = config->profile;
      context->templat.entrypoint = config->entrypoint;
      context->templat.chroma_format = PIPE_VIDEO_CHROMA_FORMAT_420;
      context->templat.width = picture_width;
      context->templat.height = picture_height;
      context->templat.expect_chunked_decode = true;

      switch (u_reduce_video_profile(context->templat.profile)) {
      case PIPE_VIDEO_FORMAT_MPEG12:
      case PIPE_VIDEO_FORMAT_VC1:
      case PIPE_VIDEO_FORMAT_MPEG4:
         context->templat.max_references = 2;
         break;

      case PIPE_VIDEO_FORMAT_MPEG4_AVC:
         context->templat.max_references = 0;
         if (config->entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE) {
            context->desc.h264.pps = CALLOC_STRUCT(pipe_h264_pps);
            if (!context->desc.h264.pps) {
               FREE(context);
               return VA_STATUS_ERROR_ALLOCATION_FAILED;
            }
            context->desc.h264.pps->sps = CALLOC_STRUCT(pipe_h264_sps);
            if (!context->desc.h264.pps->sps) {
               FREE(context->desc.h264.pps);
               FREE(context);
               return VA_STATUS_ERROR_ALLOCATION_FAILED;
            }
         }
         break;

     case PIPE_VIDEO_FORMAT_HEVC:
         context->templat.max_references = num_render_targets;
         context->desc.h265.pps = CALLOC_STRUCT(pipe_h265_pps);
         if (!context->desc.h265.pps) {
            FREE(context);
            return VA_STATUS_ERROR_ALLOCATION_FAILED;
         }
         context->desc.h265.pps->sps = CALLOC_STRUCT(pipe_h265_sps);
         if (!context->desc.h265.pps->sps) {
            FREE(context->desc.h265.pps);
            FREE(context);
            return VA_STATUS_ERROR_ALLOCATION_FAILED;
         }
         break;

      default:
         break;
      }
   }

   context->desc.base.profile = config->profile;
   context->desc.base.entry_point = config->entrypoint;
   if (config->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE)
      context->desc.h264enc.rate_ctrl.rate_ctrl_method = config->rc;

   pipe_mutex_lock(drv->mutex);
   *context_id = handle_table_add(drv->htab, context);
   pipe_mutex_unlock(drv->mutex);

   return VA_STATUS_SUCCESS;
}
Exemple #29
0
uint64
hsp_create_layer_context(Bitmap *bitmap, int layerPlane)
{
	TRACE("%s(bitmap: %p layerPlane: %d)\n", __FUNCTION__,
		bitmap, layerPlane);
	time_t total_beg, total_end;
	time_t beg, end;
	total_beg = time(NULL);
	struct hsp_context *ctx = NULL;
	struct pipe_screen *screen = NULL;
	GLvisual *visual = NULL;
	struct pipe_context *pipe = NULL;
	uint ctxId = 0;

	if (!hsp_dev) {
		TRACE("%s> there is no hsp_dev!\n", __FUNCTION__);
		return 0;
	}

	if (layerPlane != 0) {
		TRACE("%s> layerPlane != 0\n", __FUNCTION__);
		return 0;
	}

	ctx = CALLOC_STRUCT(hsp_context);
	if (!ctx) {
		TRACE("%s> can't alloc hsp_context!\n", __FUNCTION__);
		goto no_ctx;
	}

	ctx->bitmap = bitmap;
	ctx->colorSpace = get_bitmap_color_space(bitmap);
	ctx->draw = NULL;
	ctx->read = NULL;

	screen = hsp_dev->screen;

#ifdef DEBUG
    /* Unwrap screen */
    if (hsp_dev->trace_running)
        screen = trace_screen(screen)->screen;
#endif

	ulong options = hsp_dev->options;

	const GLboolean rgbFlag		= ((options & BGL_INDEX) == 0);
	const GLboolean alphaFlag	= ((options & BGL_ALPHA) == BGL_ALPHA);
	const GLboolean dblFlag		= ((options & BGL_DOUBLE) == BGL_DOUBLE);
	const GLboolean stereoFlag	= false;
	const GLint depth		= (options & BGL_DEPTH) ? 24 : 0;
	const GLint stencil		= (options & BGL_STENCIL) ? 8 : 0;
	const GLint accum		= 0;					// (options & BGL_ACCUM) ? 16 : 0;
	const GLint index		= 0;					// (options & BGL_INDEX) ? 32 : 0;
	const GLint red			= rgbFlag ? 8 : 0;
	const GLint green		= rgbFlag ? 8 : 0;
	const GLint blue		= rgbFlag ? 8 : 0;
	const GLint alpha		= alphaFlag ? 8 : 0;

	TRACE("rgb         :\t%d\n", (bool)rgbFlag);
	TRACE("alpha       :\t%d\n", (bool)alphaFlag);
	TRACE("dbl         :\t%d\n", (bool)dblFlag);
	TRACE("stereo      :\t%d\n", (bool)stereoFlag);
	TRACE("depth       :\t%d\n", depth);
	TRACE("stencil     :\t%d\n", stencil);
	TRACE("accum       :\t%d\n", accum);
	TRACE("index       :\t%d\n", index);
	TRACE("red         :\t%d\n", red);
	TRACE("green       :\t%d\n", green);
	TRACE("blue        :\t%d\n", blue);
	TRACE("alpha       :\t%d\n", alpha);

	visual = _mesa_create_visual(
		rgbFlag, dblFlag, stereoFlag, red, green, blue, alpha, index, depth,
		stencil, accum, accum, accum, alpha ? accum : 0, 1);

	TRACE("depthBits   :\t%d\n", visual->depthBits);
	TRACE("stencilBits :\t%d\n", visual->stencilBits);

	if (!visual) {
		TRACE("%s> can't create mesa visual!\n", __FUNCTION__);
		goto no_ctx_id;
	}

	pipe = hsp_dev->hsp_winsys->create_context(screen);

	if (!pipe) {
		TRACE("%s> can't create pipe!\n", __FUNCTION__);
		goto no_pipe;
	}

#ifdef DEBUG
    if (hsp_dev->trace_running)
        pipe = trace_context_create(hsp_dev->screen, pipe);
#endif

	assert(!pipe->priv);
	pipe->priv = bitmap;

	ctx->st = st_create_context(pipe, visual, NULL);
	if (!ctx->st) {
		TRACE("%s> can't create mesa statetracker context!\n",
			__FUNCTION__);
		goto no_st_ctx;
	}

	ctx->st->ctx->DriverCtx = ctx;

	pipe_mutex_lock(hsp_dev->mutex);
	uint64 i;
	for (i = 0; i < HSP_CONTEXT_MAX; i++) {
		if (hsp_dev->ctx_array[i].ctx == NULL) {
			hsp_dev->ctx_array[i].ctx = ctx;
			ctxId = i + 1;
			break;
		}
	}
	pipe_mutex_unlock(hsp_dev->mutex);

	if (ctxId != 0)
		return ctxId;

no_ctx_id:
	TRACE("%s> no_ctx_id!\n", __FUNCTION__);
    _mesa_destroy_visual(visual);
    st_destroy_context(ctx->st);
    goto no_pipe; /* st_context_destroy already destroy pipe */
no_st_ctx:
	TRACE("%s> no_st_ctx!\n", __FUNCTION__);
    pipe->destroy(pipe);
no_pipe:
	TRACE("%s> no_pipe!\n", __FUNCTION__);
    FREE(ctx);
no_ctx:
	TRACE("%s> no_ctx!\n", __FUNCTION__);
    return 0;
}
Exemple #30
0
bool
hsp_make_current(Bitmap *bitmap, uint64 ctxId)
{
	TRACE("%s(bitmap: %p ctxId: %d)\n", __FUNCTION__, bitmap, ctxId);
	struct hsp_context *ctx = NULL;
	GET_CURRENT_CONTEXT(glcurctx);
	GLuint width = 0;
	GLuint height = 0;
	struct hsp_context *curctx;

	if (!hsp_dev) {
		TRACE("%s> there's no hsp_dev so nothing to do\n",
			__FUNCTION__);
		return false;
	}

	pipe_mutex_lock(hsp_dev->mutex);
	ctx = hsp_lookup_context(ctxId);
	pipe_mutex_unlock(hsp_dev->mutex);

	if (ctx == NULL) {
		TRACE("%s> context not found\n", __FUNCTION__);
		return false;
	}

	current_bitmap = bitmap;
	current_ctx_id = ctxId;

	if (glcurctx != NULL) {
		curctx = (struct hsp_context*) glcurctx->DriverCtx;

		if (curctx != ctx)
			st_flush(glcurctx->st, PIPE_FLUSH_RENDER_CACHE, NULL);
	}

	if (!bitmap || ctxId == 0) {
		st_make_current(NULL, NULL, NULL);
		return true;
	}

	if (glcurctx != NULL) {
		struct hsp_context *curctx = (struct hsp_context*) glcurctx->DriverCtx;
		if (curctx != NULL && curctx == ctx && ctx->bitmap == bitmap)
			return true;
	}

	if (bitmap != NULL) {
		get_bitmap_size(bitmap, &width, &height);
		TRACE("%s> found bitmap: %p with size: %dx%d\n", __FUNCTION__,
			bitmap, width, height);
	}

	if (ctx != NULL && bitmap != NULL ) {
		GLvisual *visual = &ctx->st->ctx->Visual;
		if (ctx->draw == NULL) {
			ctx->draw = framebuffer_create(bitmap, visual, width /*+ 1*/, height /*+ 1*/);
		}
		if ((hsp_dev->options & BGL_DOUBLE) == BGL_DOUBLE && ctx->read == NULL) {
			ctx->read = framebuffer_create(bitmap, visual, width /*+ 1*/, height /*+ 1*/);
		}
	}

	if (ctx) {
		if (ctx->draw && ctx->read == NULL) {
			st_make_current(ctx->st, ctx->draw->stfb, ctx->draw->stfb);
			framebuffer_resize(ctx->draw, width /*+ 1*/, height /*+ 1*/);
		} else if (ctx && ctx->draw && ctx->read) {
			st_make_current(ctx->st, ctx->draw->stfb, ctx->read->stfb);
			framebuffer_resize(ctx->draw, width /*+ 1*/, height /*+ 1*/);
			framebuffer_resize(ctx->read, width /*+ 1*/, height /*+ 1*/);
		}
		ctx->bitmap = bitmap;
		ctx->st->pipe->priv = bitmap;
	} else {
		st_make_current(NULL, NULL, NULL);
	}

	return true;
}