int intel_bo_add_reloc(struct intel_bo *bo, uint32_t offset, struct intel_bo *target_bo, uint32_t target_offset, uint32_t read_domains, uint32_t write_domain, uint64_t *presumed_offset) { int err; err = drm_intel_bo_emit_reloc(gem_bo(bo), offset, gem_bo(target_bo), target_offset, read_domains, write_domain); *presumed_offset = gem_bo(target_bo)->offset64 + target_offset; return err; }
int intel_bo_set_tiling(struct intel_bo *bo, enum intel_tiling_mode tiling, unsigned long pitch) { uint32_t real_tiling = tiling; int err; switch (tiling) { case INTEL_TILING_X: if (pitch % 512) return -1; break; case INTEL_TILING_Y: if (pitch % 128) return -1; break; default: break; } err = drm_intel_bo_set_tiling(gem_bo(bo), &real_tiling, pitch); if (err || real_tiling != tiling) { assert(!"tiling mismatch"); return -1; } return 0; }
void intel_bo_unmap(struct intel_bo *bo) { int err; err = drm_intel_bo_unmap(gem_bo(bo)); assert(!err); }
struct intel_bo * intel_bo_ref(struct intel_bo *bo) { if (bo) drm_intel_bo_reference(gem_bo(bo)); return bo; }
int intel_bo_wait(struct intel_bo *bo, int64_t timeout) { int err; if (timeout >= 0) { err = drm_intel_gem_bo_wait(gem_bo(bo), timeout); } else { drm_intel_bo_wait_rendering(gem_bo(bo)); err = 0; } /* consider the bo idle on errors */ if (err && err != -ETIME) err = 0; return err; }
int intel_winsys_export_handle(struct intel_winsys *winsys, struct intel_bo *bo, enum intel_tiling_mode tiling, unsigned long pitch, unsigned long height, struct winsys_handle *handle) { int err = 0; switch (handle->type) { case DRM_API_HANDLE_TYPE_SHARED: { uint32_t name; err = drm_intel_bo_flink(gem_bo(bo), &name); if (!err) handle->handle = name; } break; case DRM_API_HANDLE_TYPE_KMS: handle->handle = gem_bo(bo)->handle; break; case DRM_API_HANDLE_TYPE_FD: { int fd; err = drm_intel_bo_gem_export_to_prime(gem_bo(bo), &fd); if (!err) handle->handle = fd; } break; default: err = -EINVAL; break; } if (err) return err; handle->stride = pitch; return 0; }
int intel_bo_wait(struct intel_bo *bo, int64_t timeout) { int err; err = drm_intel_gem_bo_wait(gem_bo(bo), timeout); /* consider the bo idle on errors */ if (err && err != -ETIME) err = 0; return err; }
int intel_winsys_submit_bo(struct intel_winsys *winsys, enum intel_ring_type ring, struct intel_bo *bo, int used, struct intel_context *ctx, unsigned long flags) { const unsigned long exec_flags = (unsigned long) ring | flags; /* logical contexts are only available for the render ring */ if (ring != INTEL_RING_RENDER) ctx = NULL; if (ctx) { return drm_intel_gem_bo_context_exec(gem_bo(bo), (drm_intel_context *) ctx, used, exec_flags); } else { return drm_intel_bo_mrb_exec(gem_bo(bo), used, NULL, 0, 0, exec_flags); } }
void * intel_bo_map_gtt_async(struct intel_bo *bo) { int err; err = drm_intel_gem_bo_map_unsynchronized(gem_bo(bo)); if (err) { debug_error("failed to map bo"); return NULL; } return gem_bo(bo)->virtual; }
int intel_bo_add_reloc(struct intel_bo *bo, uint32_t offset, struct intel_bo *target_bo, uint32_t target_offset, uint32_t flags, uint64_t *presumed_offset) { uint32_t read_domains, write_domain; int err; if (flags & INTEL_RELOC_WRITE) { /* * Because of the translation to domains, INTEL_RELOC_GGTT should only * be set on GEN6 when the bo is written by MI_* or PIPE_CONTROL. The * kernel will translate it back to INTEL_RELOC_GGTT. */ write_domain = (flags & INTEL_RELOC_GGTT) ? I915_GEM_DOMAIN_INSTRUCTION : I915_GEM_DOMAIN_RENDER; read_domains = write_domain; } else { write_domain = 0; read_domains = I915_GEM_DOMAIN_RENDER | I915_GEM_DOMAIN_SAMPLER | I915_GEM_DOMAIN_INSTRUCTION | I915_GEM_DOMAIN_VERTEX; } if (flags & INTEL_RELOC_FENCE) { err = drm_intel_bo_emit_reloc_fence(gem_bo(bo), offset, gem_bo(target_bo), target_offset, read_domains, write_domain); } else { err = drm_intel_bo_emit_reloc(gem_bo(bo), offset, gem_bo(target_bo), target_offset, read_domains, write_domain); } *presumed_offset = gem_bo(target_bo)->offset64 + target_offset; return err; }
void * intel_bo_map(struct intel_bo *bo, bool write_enable) { int err; err = drm_intel_bo_map(gem_bo(bo), write_enable); if (err) { debug_error("failed to map bo"); return NULL; } return gem_bo(bo)->virtual; }
void intel_winsys_decode_bo(struct intel_winsys *winsys, struct intel_bo *bo, int used) { void *ptr; ptr = intel_bo_map(bo, false); if (!ptr) { debug_printf("failed to map buffer for decoding\n"); return; } pipe_mutex_lock(winsys->mutex); if (!winsys->decode) { winsys->decode = drm_intel_decode_context_alloc(winsys->info.devid); if (!winsys->decode) { pipe_mutex_unlock(winsys->mutex); intel_bo_unmap(bo); return; } /* debug_printf()/debug_error() uses stderr by default */ drm_intel_decode_set_output_file(winsys->decode, stderr); } /* in dwords */ used /= 4; drm_intel_decode_set_batch_pointer(winsys->decode, ptr, gem_bo(bo)->offset64, used); drm_intel_decode(winsys->decode); pipe_mutex_unlock(winsys->mutex); intel_bo_unmap(bo); }
bool intel_bo_has_reloc(struct intel_bo *bo, struct intel_bo *target_bo) { return drm_intel_bo_references(gem_bo(bo), gem_bo(target_bo)); }
void intel_bo_truncate_relocs(struct intel_bo *bo, int start) { drm_intel_gem_bo_clear_relocs(gem_bo(bo), start); }
int intel_bo_get_reloc_count(struct intel_bo *bo) { return drm_intel_gem_bo_get_reloc_count(gem_bo(bo)); }
int intel_bo_pread(struct intel_bo *bo, unsigned long offset, unsigned long size, void *data) { return drm_intel_bo_get_subdata(gem_bo(bo), offset, size, data); }
int intel_bo_pwrite(struct intel_bo *bo, unsigned long offset, unsigned long size, const void *data) { return drm_intel_bo_subdata(gem_bo(bo), offset, size, data); }
void intel_bo_unreference(struct intel_bo *bo) { drm_intel_bo_unreference(gem_bo(bo)); }