Esempio n. 1
0
void
ilo_render_destroy(struct ilo_render *render)
{
   intel_bo_unref(render->vs_scratch.bo);
   intel_bo_unref(render->gs_scratch.bo);
   intel_bo_unref(render->fs_scratch.bo);

   intel_bo_unref(render->workaround_bo);
   FREE(render);
}
Esempio n. 2
0
void intel_queue_destroy(struct intel_queue *queue)
{
    if (queue->fence)
        intel_fence_destroy(queue->fence);

    intel_bo_unref(queue->atomic_bo);
    intel_bo_unref(queue->select_graphics_bo);
    intel_bo_unref(queue->select_compute_bo);

    intel_base_destroy(&queue->base);
}
Esempio n. 3
0
/**
 * Allocate and map the buffer for writing.
 */
static VkResult cmd_writer_alloc_and_map(struct intel_cmd *cmd,
                                           enum intel_cmd_writer_type which)
{
    struct intel_cmd_writer *writer = &cmd->writers[which];
    struct intel_bo *bo;

    bo = alloc_writer_bo(cmd->dev->winsys, which, writer->size);
    if (bo) {
        intel_bo_unref(writer->bo);
        writer->bo = bo;
    } else if (writer->bo) {
        /* reuse the old bo */
        cmd_writer_discard(cmd, which);
    } else {
        return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    }

    writer->used = 0;
    writer->item_used = 0;

    writer->ptr = intel_bo_map(writer->bo, true);
    if (!writer->ptr) {
        return VK_ERROR_MEMORY_MAP_FAILED;
    }

    return VK_SUCCESS;
}
Esempio n. 4
0
static struct intel_bo *queue_create_bo(struct intel_queue *queue,
                                        VkDeviceSize size,
                                        const void *cmd,
                                        size_t cmd_len)
{
    struct intel_bo *bo;
    void *ptr;

    bo = intel_winsys_alloc_bo(queue->dev->winsys,
            "queue bo", size, true);
    if (!bo)
        return NULL;

    if (!cmd_len)
        return bo;

    ptr = intel_bo_map(bo, true);
    if (!ptr) {
        intel_bo_unref(bo);
        return NULL;
    }

    memcpy(ptr, cmd, cmd_len);
    intel_bo_unmap(bo);

    return bo;
}
Esempio n. 5
0
/**
 * Grow a mapped writer to at least \p new_size.  Failures are handled
 * silently.
 */
void cmd_writer_grow(struct intel_cmd *cmd,
                     enum intel_cmd_writer_type which,
                     size_t new_size)
{
    struct intel_cmd_writer *writer = &cmd->writers[which];
    struct intel_bo *new_bo;
    void *new_ptr;

    if (new_size < writer->size << 1)
        new_size = writer->size << 1;
    /* STATE_BASE_ADDRESS requires page-aligned buffers */
    new_size = u_align(new_size, 4096);

    new_bo = alloc_writer_bo(cmd->dev->winsys, which, new_size);
    if (!new_bo) {
        cmd_writer_discard(cmd, which);
        cmd_fail(cmd, VK_ERROR_OUT_OF_DEVICE_MEMORY);
        return;
    }

    /* map and copy the data over */
    new_ptr = intel_bo_map(new_bo, true);
    if (!new_ptr) {
        intel_bo_unref(new_bo);
        cmd_writer_discard(cmd, which);
        cmd_fail(cmd, VK_ERROR_VALIDATION_FAILED_EXT);
        return;
    }

    memcpy(new_ptr, writer->ptr, writer->used);

    intel_bo_unmap(writer->bo);
    intel_bo_unref(writer->bo);

    writer->size = new_size;
    writer->bo = new_bo;
    writer->ptr = new_ptr;
}
Esempio n. 6
0
static VkResult queue_init_hw_and_atomic_bo(struct intel_queue *queue)
{
    const uint32_t ctx_init_cmd[] = {
        /* STATE_SIP */
        GEN6_RENDER_CMD(COMMON, STATE_SIP),
        0,
        /* PIPELINE_SELECT */
        GEN6_RENDER_CMD(SINGLE_DW, PIPELINE_SELECT) |
            GEN6_PIPELINE_SELECT_DW0_SELECT_3D,
        /* 3DSTATE_VF_STATISTICS */
        GEN6_RENDER_CMD(SINGLE_DW, 3DSTATE_VF_STATISTICS),
        /* end */
        GEN6_MI_CMD(MI_BATCH_BUFFER_END),
        GEN6_MI_CMD(MI_NOOP),
    };
    struct intel_bo *bo;
    VkResult ret;

    if (queue->ring != INTEL_RING_RENDER) {
        queue->last_pipeline_select = -1;
        queue->atomic_bo = queue_create_bo(queue,
                sizeof(uint32_t) * INTEL_QUEUE_ATOMIC_COUNTER_COUNT,
                NULL, 0);
        return (queue->atomic_bo) ? VK_SUCCESS : VK_ERROR_OUT_OF_DEVICE_MEMORY;
    }

    bo = queue_create_bo(queue,
            sizeof(uint32_t) * INTEL_QUEUE_ATOMIC_COUNTER_COUNT,
            ctx_init_cmd, sizeof(ctx_init_cmd));
    if (!bo)
        return VK_ERROR_OUT_OF_DEVICE_MEMORY;

    ret = queue_submit_bo(queue, bo, sizeof(ctx_init_cmd));
    if (ret != VK_SUCCESS) {
        intel_bo_unref(bo);
        return ret;
    }

    queue->last_pipeline_select = GEN6_PIPELINE_SELECT_DW0_SELECT_3D;
    /* reuse */
    queue->atomic_bo = bo;

    return VK_SUCCESS;
}
Esempio n. 7
0
/**
 * Flush the command parser and execute the commands.  When the parser buffer
 * is empty, the callback is not invoked.
 */
void
ilo_cp_submit_internal(struct ilo_cp *cp)
{
   const bool do_exec = !(ilo_debug & ILO_DEBUG_NOHW);
   struct intel_bo *bo;
   unsigned used;
   int err;

   bo = ilo_cp_end_batch(cp, &used);
   if (!bo)
      return;

   if (likely(do_exec)) {
      err = intel_winsys_submit_bo(cp->winsys, cp->ring,
            bo, used, cp->render_ctx, cp->one_off_flags);
   }
   else {
      err = 0;
   }

   cp->one_off_flags = 0;

   if (!err) {
      bool guilty;

      intel_bo_unref(cp->last_submitted_bo);
      cp->last_submitted_bo = intel_bo_ref(bo);

      guilty = ilo_cp_detect_hang(cp);

      if (unlikely((ilo_debug & ILO_DEBUG_BATCH) || guilty)) {
         ilo_builder_decode(&cp->builder);
         if (guilty)
            abort();
      }

      if (cp->submit_callback)
         cp->submit_callback(cp, cp->submit_callback_data);
   }

   ilo_builder_begin(&cp->builder);
}
Esempio n. 8
0
static bool
resize_scratch_space(struct ilo_render *render,
                     struct ilo_render_scratch_space *scratch,
                     const char *name, int new_size)
{
   struct intel_bo *bo;

   if (scratch->size >= new_size)
      return true;

   bo = intel_winsys_alloc_bo(render->builder->winsys, name, new_size, false);
   if (!bo)
      return false;

   intel_bo_unref(scratch->bo);
   scratch->bo = bo;
   scratch->size = new_size;

   return true;
}
Esempio n. 9
0
void intel_dev_destroy(struct intel_dev *dev)
{
    struct intel_gpu *gpu = dev->gpu;
    uint32_t i;

    for (i = 0; i < ARRAY_SIZE(dev->queues); i++) {
        if (dev->queues[i])
            intel_queue_destroy(dev->queues[i]);
    }

    if (dev->desc_region)
        intel_desc_region_destroy(dev, dev->desc_region);

    dev_destroy_meta_shaders(dev);

    intel_bo_unref(dev->cmd_scratch_bo);

    intel_base_destroy(&dev->base);

    if (gpu->winsys)
        intel_gpu_cleanup_winsys(gpu);
}
Esempio n. 10
0
/**
 * Free all resources used by a writer.  Note that the initial size is not
 * reset.
 */
static void cmd_writer_reset(struct intel_cmd *cmd,
                             enum intel_cmd_writer_type which)
{
    struct intel_cmd_writer *writer = &cmd->writers[which];

    if (writer->ptr) {
        intel_bo_unmap(writer->bo);
        writer->ptr = NULL;
    }

    intel_bo_unref(writer->bo);
    writer->bo = NULL;

    writer->used = 0;

    writer->sba_offset = 0;

    if (writer->items) {
        intel_free(cmd, writer->items);
        writer->items = NULL;
        writer->item_alloc = 0;
        writer->item_used = 0;
    }
}
Esempio n. 11
0
void intel_mem_free(struct intel_mem *mem)
{
    intel_bo_unref(mem->bo);

    intel_base_destroy(&mem->base);
}