static struct intel_bo *queue_create_bo(struct intel_queue *queue, VkDeviceSize size, const void *cmd, size_t cmd_len) { struct intel_bo *bo; void *ptr; bo = intel_winsys_alloc_bo(queue->dev->winsys, "queue bo", size, true); if (!bo) return NULL; if (!cmd_len) return bo; ptr = intel_bo_map(bo, true); if (!ptr) { intel_bo_unref(bo); return NULL; } memcpy(ptr, cmd, cmd_len); intel_bo_unmap(bo); return bo; }
/** * Unmap the buffer for submission. */ static void cmd_writer_unmap(struct intel_cmd *cmd, enum intel_cmd_writer_type which) { struct intel_cmd_writer *writer = &cmd->writers[which]; intel_bo_unmap(writer->bo); writer->ptr = NULL; }
void intel_winsys_decode_bo(struct intel_winsys *winsys, struct intel_bo *bo, int used) { void *ptr; ptr = intel_bo_map(bo, false); if (!ptr) { debug_printf("failed to map buffer for decoding\n"); return; } pipe_mutex_lock(winsys->mutex); if (!winsys->decode) { winsys->decode = drm_intel_decode_context_alloc(winsys->info.devid); if (!winsys->decode) { pipe_mutex_unlock(winsys->mutex); intel_bo_unmap(bo); return; } /* debug_printf()/debug_error() uses stderr by default */ drm_intel_decode_set_output_file(winsys->decode, stderr); } /* in dwords */ used /= 4; drm_intel_decode_set_batch_pointer(winsys->decode, ptr, gem_bo(bo)->offset64, used); drm_intel_decode(winsys->decode); pipe_mutex_unlock(winsys->mutex); intel_bo_unmap(bo); }
/** * Destroy the command parser. */ void ilo_cp_destroy(struct ilo_cp *cp) { if (cp->bo) { if (!cp->sys) intel_bo_unmap(cp->bo); intel_bo_unreference(cp->bo); } if (cp->render_ctx) intel_winsys_destroy_context(cp->winsys, cp->render_ctx); FREE(cp->sys); FREE(cp); }
/** * Upload the parser buffer to the bo. */ static int ilo_cp_upload_buffer(struct ilo_cp *cp) { int err; if (!cp->sys) { intel_bo_unmap(cp->bo); return 0; } err = intel_bo_pwrite(cp->bo, 0, cp->used * 4, cp->ptr); if (likely(!err && cp->stolen)) { const int offset = cp->bo_size - cp->stolen; err = intel_bo_pwrite(cp->bo, offset * 4, cp->stolen * 4, &cp->ptr[offset]); } return err; }
static void ilo_builder_writer_decode_items(struct ilo_builder *builder, enum ilo_builder_writer_type which) { struct ilo_builder_writer *writer = &builder->writers[which]; int i; if (!writer->item_used) return; writer->ptr = intel_bo_map(writer->bo, false); if (!writer->ptr) return; for (i = 0; i < writer->item_used; i++) { const struct ilo_builder_item *item = &writer->items[i]; writer_decode_table[item->type].func(builder, which, item); } intel_bo_unmap(writer->bo); }
/** * Grow a mapped writer to at least \p new_size. Failures are handled * silently. */ void cmd_writer_grow(struct intel_cmd *cmd, enum intel_cmd_writer_type which, size_t new_size) { struct intel_cmd_writer *writer = &cmd->writers[which]; struct intel_bo *new_bo; void *new_ptr; if (new_size < writer->size << 1) new_size = writer->size << 1; /* STATE_BASE_ADDRESS requires page-aligned buffers */ new_size = u_align(new_size, 4096); new_bo = alloc_writer_bo(cmd->dev->winsys, which, new_size); if (!new_bo) { cmd_writer_discard(cmd, which); cmd_fail(cmd, VK_ERROR_OUT_OF_DEVICE_MEMORY); return; } /* map and copy the data over */ new_ptr = intel_bo_map(new_bo, true); if (!new_ptr) { intel_bo_unref(new_bo); cmd_writer_discard(cmd, which); cmd_fail(cmd, VK_ERROR_VALIDATION_FAILED_EXT); return; } memcpy(new_ptr, writer->ptr, writer->used); intel_bo_unmap(writer->bo); intel_bo_unref(writer->bo); writer->size = new_size; writer->bo = new_bo; writer->ptr = new_ptr; }
/** * Free all resources used by a writer. Note that the initial size is not * reset. */ static void cmd_writer_reset(struct intel_cmd *cmd, enum intel_cmd_writer_type which) { struct intel_cmd_writer *writer = &cmd->writers[which]; if (writer->ptr) { intel_bo_unmap(writer->bo); writer->ptr = NULL; } intel_bo_unref(writer->bo); writer->bo = NULL; writer->used = 0; writer->sba_offset = 0; if (writer->items) { intel_free(cmd, writer->items); writer->items = NULL; writer->item_alloc = 0; writer->item_used = 0; } }
/** * Process the bo and accumulate the result. The bo is emptied. */ static void query_process_bo(const struct ilo_context *ilo, struct ilo_query *q) { const uint64_t *vals; uint64_t tmp; int i; if (!q->used) return; vals = intel_bo_map(q->bo, false); if (!vals) { q->used = 0; return; } switch (q->type) { case PIPE_QUERY_OCCLUSION_COUNTER: case PIPE_QUERY_OCCLUSION_PREDICATE: case PIPE_QUERY_TIME_ELAPSED: case PIPE_QUERY_PRIMITIVES_GENERATED: case PIPE_QUERY_PRIMITIVES_EMITTED: assert(q->stride == sizeof(*vals) * 2); tmp = 0; for (i = 0; i < q->used; i++) tmp += vals[2 * i + 1] - vals[2 * i]; if (q->type == PIPE_QUERY_TIME_ELAPSED) tmp = query_timestamp_to_ns(ilo, tmp); q->result.u64 += tmp; break; case PIPE_QUERY_TIMESTAMP: assert(q->stride == sizeof(*vals)); q->result.u64 = query_timestamp_to_ns(ilo, vals[q->used - 1]); break; case PIPE_QUERY_PIPELINE_STATISTICS: assert(q->stride == sizeof(*vals) * 22); for (i = 0; i < q->used; i++) { struct pipe_query_data_pipeline_statistics *stats = &q->result.pipeline_statistics; const uint64_t *begin = vals + 22 * i; const uint64_t *end = begin + 11; stats->ia_vertices += end[0] - begin[0]; stats->ia_primitives += end[1] - begin[1]; stats->vs_invocations += end[2] - begin[2]; stats->gs_invocations += end[3] - begin[3]; stats->gs_primitives += end[4] - begin[4]; stats->c_invocations += end[5] - begin[5]; stats->c_primitives += end[6] - begin[6]; stats->ps_invocations += end[7] - begin[7]; stats->hs_invocations += end[8] - begin[8]; stats->ds_invocations += end[9] - begin[9]; stats->cs_invocations += end[10] - begin[10]; } break; default: break; } intel_bo_unmap(q->bo); q->used = 0; }
static void draw_vbo_with_sw_restart(struct ilo_context *ilo, const struct pipe_draw_info *info) { const struct ilo_ib_state *ib = &ilo->state_vector.ib; const struct ilo_vma *vma; union { const void *ptr; const uint8_t *u8; const uint16_t *u16; const uint32_t *u32; } u; /* we will draw with IB mapped */ if (ib->state.buffer) { vma = ilo_resource_get_vma(ib->state.buffer); u.ptr = intel_bo_map(vma->bo, false); if (u.ptr) u.u8 += vma->bo_offset + ib->state.offset; } else { vma = NULL; u.ptr = ib->state.user_buffer; } if (!u.ptr) return; #define DRAW_VBO_WITH_SW_RESTART(pipe, info, ptr) do { \ const unsigned end = (info)->start + (info)->count; \ struct pipe_draw_info subinfo; \ unsigned i; \ \ subinfo = *(info); \ subinfo.primitive_restart = false; \ for (i = (info)->start; i < end; i++) { \ if ((ptr)[i] == (info)->restart_index) { \ subinfo.count = i - subinfo.start; \ if (subinfo.count) \ (pipe)->draw_vbo(pipe, &subinfo); \ subinfo.start = i + 1; \ } \ } \ subinfo.count = i - subinfo.start; \ if (subinfo.count) \ (pipe)->draw_vbo(pipe, &subinfo); \ } while (0) switch (ib->state.index_size) { case 1: DRAW_VBO_WITH_SW_RESTART(&ilo->base, info, u.u8); break; case 2: DRAW_VBO_WITH_SW_RESTART(&ilo->base, info, u.u16); break; case 4: DRAW_VBO_WITH_SW_RESTART(&ilo->base, info, u.u32); break; default: assert(!"unsupported index size"); break; } #undef DRAW_VBO_WITH_SW_RESTART if (vma) intel_bo_unmap(vma->bo); }