static boolean radeon_get_drm_value(int fd, unsigned request, const char *errname, uint32_t *out) { struct drm_radeon_info info; int retval; memset(&info, 0, sizeof(info)); info.value = (unsigned long)out; info.request = request; retval = drmCommandWriteRead(fd, DRM_RADEON_INFO, &info, sizeof(info)); if (retval) { if (errname) { fprintf(stderr, "radeon: Failed to get %s, error number %d\n", errname, retval); } return FALSE; } return TRUE; }
int vmw_ioctl_fence_signalled(struct vmw_winsys_screen *vws, uint32_t handle, uint32_t flags) { struct drm_vmw_fence_signaled_arg arg; uint32_t vflags = vmw_drm_fence_flags(flags); int ret; memset(&arg, 0, sizeof(arg)); arg.handle = handle; arg.flags = vflags; ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_FENCE_SIGNALED, &arg, sizeof(arg)); if (ret != 0) return ret; return (arg.signaled) ? 0 : -1; }
int drm_vc4_bo_set_flags(struct drm_vc4_bo *bo, uint32_t flags) { #if 0 struct drm_vc4_gem_get_flags args; struct drm_vc4 *drm = bo->drm; int err; if (!bo) return -EINVAL; memset(&args, 0, sizeof(args)); args.handle = bo->handle; args.flags = flags; err = drmCommandWriteRead(drm->fd, DRM_VC4_GEM_SET_FLAGS, &args, sizeof(args)); if (err < 0) return -errno; #endif return 0; }
/** * Release command submission context * * \param dev - \c [in] amdgpu device handle * \param context - \c [in] amdgpu context handle * * \return 0 on success otherwise POSIX Error code */ int amdgpu_cs_ctx_free(amdgpu_context_handle context) { union drm_amdgpu_ctx args; int r; if (NULL == context) return -EINVAL; pthread_mutex_destroy(&context->sequence_mutex); /* now deal with kernel side */ memset(&args, 0, sizeof(args)); args.in.op = AMDGPU_CTX_OP_FREE_CTX; args.in.ctx_id = context->id; r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args)); free(context); return r; }
int pscnv_gem_info(int fd, uint32_t handle, uint32_t *cookie, uint32_t *flags, uint32_t *tile_flags, uint64_t *size, uint64_t *map_handle, uint32_t *user) { int ret; struct drm_pscnv_gem_info req; req.handle = handle; ret = drmCommandWriteRead(fd, DRM_PSCNV_GEM_INFO, &req, sizeof(req)); if (ret) return ret; if (cookie) *cookie = req.cookie; if (flags) *flags = req.flags; if (tile_flags) *tile_flags = req.tile_flags; if (size) *size = req.size; if (map_handle) *map_handle = req.map_handle; if (user) memcpy(user, req.user, sizeof(req.user)); return 0; }
int amdgpu_bo_list_update(amdgpu_bo_list_handle handle, uint32_t number_of_resources, amdgpu_bo_handle *resources, uint8_t *resource_prios) { struct drm_amdgpu_bo_list_entry *list; union drm_amdgpu_bo_list args; unsigned i; int r; if (!number_of_resources) return -EINVAL; /* overflow check for multiplication */ if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry)) return -EINVAL; list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry)); if (list == NULL) return -ENOMEM; args.in.operation = AMDGPU_BO_LIST_OP_UPDATE; args.in.list_handle = handle->handle; args.in.bo_number = number_of_resources; args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry); args.in.bo_info_ptr = (uintptr_t)list; for (i = 0; i < number_of_resources; i++) { list[i].bo_handle = resources[i]->handle; if (resource_prios) list[i].bo_priority = resource_prios[i]; else list[i].bo_priority = 0; } r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST, &args, sizeof(args)); free(list); return r; }
int drm_tegra_bo_set_tiling(struct drm_tegra_bo *bo, const struct drm_tegra_bo_tiling *tiling) { struct drm_tegra_gem_set_tiling args; struct drm_tegra *drm = bo->drm; int err; if (!bo) return -EINVAL; memset(&args, 0, sizeof(args)); args.handle = bo->handle; args.mode = tiling->mode; args.value = tiling->value; err = drmCommandWriteRead(drm->fd, DRM_TEGRA_GEM_SET_TILING, &args, sizeof(args)); if (err < 0) return -errno; return 0; }
int drm_tegra_bo_get_flags(struct drm_tegra_bo *bo, uint32_t *flags) { struct drm_tegra_gem_get_flags args; struct drm_tegra *drm = bo->drm; int err; if (!bo) return -EINVAL; memset(&args, 0, sizeof(args)); args.handle = bo->handle; err = drmCommandWriteRead(drm->fd, DRM_TEGRA_GEM_GET_FLAGS, &args, sizeof(args)); if (err < 0) return -errno; if (flags) *flags = args.flags; return 0; }
int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo, uint64_t timeout_ns, bool *busy) { union drm_amdgpu_gem_wait_idle args; int r; memset(&args, 0, sizeof(args)); args.in.handle = bo->handle; args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns); r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE, &args, sizeof(args)); if (r == 0) { *busy = args.out.status; return 0; } else { fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r); return r; } }
int DrmControl::getVideoExtCommand() { if (mVideoExtCommand) { return mVideoExtCommand; } int fd = Hwcomposer::getInstance().getDrm()->getDrmFd(); union drm_psb_extension_arg video_getparam_arg; strncpy(video_getparam_arg.extension, "lnc_video_getparam", sizeof(video_getparam_arg.extension)); int ret = drmCommandWriteRead(fd, DRM_PSB_EXTENSION, &video_getparam_arg, sizeof(video_getparam_arg)); if (ret != 0) { VLOGTRACE("failed to get video extension command"); return 0; } mVideoExtCommand = video_getparam_arg.rep.driver_ioctl_offset; return mVideoExtCommand; }
/* allocate a new (un-tiled) buffer object */ struct etna_bo *etna_bo_new(struct etna_device *dev, uint32_t size, uint32_t flags) { struct etna_bo *bo = NULL; struct drm_vivante_gem_new req = { .size = size, .flags = MSM_BO_WC, // TODO figure out proper flags.. }; int ret; ret = drmCommandWriteRead(dev->fd, DRM_VIVANTE_GEM_NEW, &req, sizeof(req)); if (ret) return NULL; pthread_mutex_lock(&table_lock); bo = bo_from_handle(dev, size, req.handle); pthread_mutex_unlock(&table_lock); return bo; }
int intelEmitIrqLocked(struct intel_context *intel) { drmI830IrqEmit ie; int ret, seq; assert(((*(int *) intel->driHwLock) & ~DRM_LOCK_CONT) == (DRM_LOCK_HELD | intel->hHWContext)); ie.irq_seq = &seq; ret = drmCommandWriteRead(intel->driFd, DRM_I830_IRQ_EMIT, &ie, sizeof(ie)); if (ret) { fprintf(stderr, "%s: drmI830IrqEmit: %d\n", __FUNCTION__, ret); exit(1); } DBG("%s --> %d\n", __FUNCTION__, seq); return seq; }
/* allocate a buffer handle: */ drm_private int kgsl_bo_new_handle(struct fd_device *dev, uint32_t size, uint32_t flags, uint32_t *handle) { struct drm_kgsl_gem_create req = { .size = size, }; int ret; ret = drmCommandWriteRead(dev->fd, DRM_KGSL_GEM_CREATE, &req, sizeof(req)); if (ret) return ret; // TODO make flags match msm driver, since kgsl is legacy.. // translate flags in kgsl.. set_memtype(dev, req.handle, flags); *handle = req.handle; return 0; }
int amdgpu_bo_set_metadata(amdgpu_bo_handle bo, struct amdgpu_bo_metadata *info) { struct drm_amdgpu_gem_metadata args = {}; args.handle = bo->handle; args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA; args.data.flags = info->flags; args.data.tiling_info = info->tiling_info; if (info->size_metadata > sizeof(args.data.data)) return -EINVAL; if (info->size_metadata) { args.data.data_size_bytes = info->size_metadata; memcpy(args.data.data, info->umd_metadata, info->size_metadata); } return drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA, &args, sizeof(args)); }
int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev, void *cpu, uint64_t size, amdgpu_bo_handle *buf_handle) { int r; struct amdgpu_bo *bo; struct drm_amdgpu_gem_userptr args; uintptr_t cpu0; uint32_t ps, off; memset(&args, 0, sizeof(args)); ps = getpagesize(); cpu0 = ROUND_DOWN((uintptr_t)cpu, ps); off = (uintptr_t)cpu - cpu0; size = ROUND_UP(size + off, ps); args.addr = cpu0; args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER; args.size = size; r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR, &args, sizeof(args)); if (r) return r; bo = calloc(1, sizeof(struct amdgpu_bo)); if (!bo) return -ENOMEM; atomic_set(&bo->refcount, 1); bo->dev = dev; bo->alloc_size = size; bo->handle = args.handle; *buf_handle = bo; return r; }
/* Flush the indirect buffer to the kernel for submission to the card */ void R600CPFlushIndirect(ScrnInfoPtr pScrn, drmBufPtr ib) { drmBufPtr buffer = ib; int start = 0; drm_radeon_indirect_t indirect; int drmFD = RHDDRMFDGet(pScrn->scrnIndex); if (!buffer) return; while (buffer->used & 0x3c){ E32(buffer, CP_PACKET2()); /* fill up to multiple of 16 dwords */ } indirect.idx = buffer->idx; indirect.start = start; indirect.end = buffer->used; indirect.discard = 1; drmCommandWriteRead(drmFD, DRM_RADEON_INDIRECT, &indirect, sizeof(drm_radeon_indirect_t)); }
static void radeon_bo_get_tiling(struct pb_buffer *_buf, enum radeon_bo_layout *microtiled, enum radeon_bo_layout *macrotiled) { struct radeon_bo *bo = get_radeon_bo(_buf); struct drm_radeon_gem_set_tiling args = {}; args.handle = bo->handle; drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_GET_TILING, &args, sizeof(args)); *microtiled = RADEON_LAYOUT_LINEAR; *macrotiled = RADEON_LAYOUT_LINEAR; if (args.tiling_flags & RADEON_BO_FLAGS_MICRO_TILE) *microtiled = RADEON_LAYOUT_TILED; if (args.tiling_flags & RADEON_BO_FLAGS_MACRO_TILE) *macrotiled = RADEON_LAYOUT_TILED; }
/** * Create command submission context * * \param dev - \c [in] amdgpu device handle * \param context - \c [out] amdgpu context handle * * \return 0 on success otherwise POSIX Error code */ int amdgpu_cs_ctx_create(amdgpu_device_handle dev, amdgpu_context_handle *context) { struct amdgpu_context *gpu_context; union drm_amdgpu_ctx args; int r; if (NULL == dev) return -EINVAL; if (NULL == context) return -EINVAL; gpu_context = calloc(1, sizeof(struct amdgpu_context)); if (NULL == gpu_context) return -ENOMEM; gpu_context->dev = dev; r = pthread_mutex_init(&gpu_context->sequence_mutex, NULL); if (r) goto error; /* Create the context */ memset(&args, 0, sizeof(args)); args.in.op = AMDGPU_CTX_OP_ALLOC_CTX; r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args)); if (r) goto error; gpu_context->id = args.out.alloc.ctx_id; *context = (amdgpu_context_handle)gpu_context; return 0; error: pthread_mutex_destroy(&gpu_context->sequence_mutex); free(gpu_context); return r; }
/* This version of AllocateMemoryMESA allocates only GART memory, and * only does so after the point at which the driver has been * initialized. * * Theoretically a valid context isn't required. However, in this * implementation, it is, as I'm using the hardware lock to protect * the kernel data structures, and the current context to get the * device fd. */ void *r200AllocateMemoryMESA(__DRIscreen *screen, GLsizei size, GLfloat readfreq, GLfloat writefreq, GLfloat priority) { GET_CURRENT_CONTEXT(ctx); r200ContextPtr rmesa; int region_offset; drm_radeon_mem_alloc_t alloc; int ret; if (R200_DEBUG & RADEON_IOCTL) fprintf(stderr, "%s sz %d %f/%f/%f\n", __FUNCTION__, size, readfreq, writefreq, priority); if (!ctx || !(rmesa = R200_CONTEXT(ctx)) || !rmesa->radeon.radeonScreen->gartTextures.map) return NULL; if (getenv("R200_NO_ALLOC")) return NULL; alloc.region = RADEON_MEM_REGION_GART; alloc.alignment = 0; alloc.size = size; alloc.region_offset = ®ion_offset; ret = drmCommandWriteRead( rmesa->radeon.radeonScreen->driScreen->fd, DRM_RADEON_ALLOC, &alloc, sizeof(alloc)); if (ret) { fprintf(stderr, "%s: DRM_RADEON_ALLOC ret %d\n", __FUNCTION__, ret); return NULL; } { char *region_start = (char *)rmesa->radeon.radeonScreen->gartTextures.map; return (void *)(region_start + region_offset); } }
int drm_tegra_bo_create(struct drm_tegra *drm, uint32_t flags, uint32_t size, struct drm_tegra_bo **bop) { struct drm_tegra_gem_create args; struct drm_tegra_bo *bo; int err; if (!drm || size == 0 || !bop) return -EINVAL; bo = calloc(1, sizeof(*bo)); if (!bo) return -ENOMEM; DRMINITLISTHEAD(&bo->list); atomic_set(&bo->ref, 1); bo->flags = flags; bo->size = size; bo->drm = drm; memset(&args, 0, sizeof(args)); args.flags = flags; args.size = size; err = drmCommandWriteRead(drm->fd, DRM_TEGRA_GEM_CREATE, &args, sizeof(args)); if (err < 0) { free(bo); return -errno; } DRMLISTADD(&bo->list, &drm->bo_list); bo->handle = args.handle; *bop = bo; return 0; }
bool Drm::writeReadIoctl(unsigned long cmd, void *data, unsigned long size) { int err; if (mDrmFd <= 0) { ELOGTRACE("drm is not initialized"); return false; } if (!data || !size) { ELOGTRACE("invalid parameters"); return false; } err = drmCommandWriteRead(mDrmFd, cmd, data, size); if (err) { WLOGTRACE("failed to call %ld ioctl with failure %d", cmd, err); return false; } return true; }
static int mgaWaitFence( mgaContextPtr mmesa, uint32_t fence, uint32_t * curr_fence ) { int ret = ENOSYS; if ( mmesa->driScreen->drm_version.minor >= 2 ) { uint32_t temp = fence; ret = drmCommandWriteRead( mmesa->driScreen->fd, DRM_MGA_WAIT_FENCE, & temp, sizeof( uint32_t )); if (ret) { fprintf(stderr, "drmMgaSetFence: %d\n", ret); exit(1); } if ( curr_fence ) { *curr_fence = temp; } } return ret; }
/* allocate a new buffer object */ static struct omap_bo * omap_bo_new_impl(struct omap_device *dev, union omap_gem_size size, uint32_t flags) { struct omap_bo *bo = calloc(sizeof(*bo), 1); struct drm_omap_gem_new req = { .size = size, .flags = flags, }; if (size.bytes == 0) { goto fail; } if (!bo) { goto fail; } bo->dev = dev; if (flags & OMAP_BO_TILED) { bo->size = round_up(size.tiled.width, PAGE_SIZE) * size.tiled.height; } else { bo->size = size.bytes; } if (drmCommandWriteRead(dev->fd, DRM_OMAP_GEM_NEW, &req, sizeof(req))) { goto fail; } bo->handle = req.handle; return bo; fail: free(bo); return NULL; }
int VIAAllocLinear(VIAMemPtr mem, ScrnInfoPtr pScrn, unsigned long size) { #ifdef XF86DRI VIAPtr pVia = VIAPTR(pScrn); int ret; if (mem->pool) ErrorF("VIA Double Alloc.\n"); if (pVia->directRenderingEnabled) { mem->pScrn = pScrn; mem->drm_fd = pVia->drmFD; mem->drm.context = DRIGetContext(pScrn->pScreen); mem->drm.size = size; mem->drm.type = VIA_MEM_VIDEO; ret = drmCommandWriteRead(mem->drm_fd, DRM_VIA_ALLOCMEM, &mem->drm, sizeof(drm_via_mem_t)); if (ret || (size != mem->drm.size)) { /* Try X Offsceen fallback before failing. */ if (Success == viaOffScreenLinear(mem, pScrn, size)) return Success; ErrorF("DRM memory allocation failed\n"); return BadAlloc; } mem->base = mem->drm.offset; mem->pool = 2; DEBUG(ErrorF("Fulfilled via DRI at %lu\n", mem->base)); return Success; } #endif if (Success == viaOffScreenLinear(mem, pScrn, size)) return Success; ErrorF("Linear memory allocation failed\n"); return BadAlloc; }
void OMXVideoDecoderAVCSecure::WaitForFrameDisplayed() { if (mDrmDevFd <= 0) { ALOGE("Invalid mDrmDevFd"); return; } // Wait up to 200ms until both overlay planes are disabled int status = 3; int retry = 20; while (retry--) { for (int i = 0; i < 2; i++) { if (status & (1 << i)) { struct drm_psb_register_rw_arg arg; memset(&arg, 0, sizeof(struct drm_psb_register_rw_arg)); arg.get_plane_state_mask = 1; arg.plane.type = DC_OVERLAY_PLANE; arg.plane.index = i; int ret = drmCommandWriteRead(mDrmDevFd, DRM_PSB_REGISTER_RW, &arg, sizeof(arg)); if (ret != 0) { ALOGE("Failed to query status of overlay plane %d, ret = %d", i, ret); status &= ~(1 << i); } else if (arg.plane.ctx == PSB_DC_PLANE_DISABLED) { status &= ~(1 << i); } } } if (status == 0) { break; } // Sleep 10ms then query again usleep(10000); } if (status != 0) { ALOGE("Overlay planes not disabled, status %d", status); } }
/** * \brief Upload texture image. * * \param rmesa Radeon context. * \param t Radeon texture object. * \param level level of the image to take the sub-image. * \param x sub-image abscissa. * \param y sub-image ordinate. * \param width sub-image width. * \param height sub-image height. * * Fills in a drmRadeonTexture and drmRadeonTexImage structures and uploads the * texture via the DRM_RADEON_TEXTURE ioctl, aborting in case of failure. */ static void radeonUploadSubImage( radeonContextPtr rmesa, radeonTexObjPtr t, GLint level, GLint x, GLint y, GLint width, GLint height ) { struct gl_texture_image *texImage; GLint ret; drmRadeonTexture tex; drmRadeonTexImage tmp; level += t->firstLevel; texImage = t->tObj->Image[0][level]; if ( !texImage || !texImage->Data ) return; t->image[level].data = texImage->Data; tex.offset = t->bufAddr; tex.pitch = (t->image[0].width * texImage->TexFormat->TexelBytes) / 64; tex.format = t->pp_txformat & RADEON_TXFORMAT_FORMAT_MASK; tex.width = texImage->Width; tex.height = texImage->Height; tex.image = &tmp; memcpy( &tmp, &t->image[level], sizeof(drmRadeonTexImage) ); do { ret = drmCommandWriteRead( rmesa->dri.fd, DRM_RADEON_TEXTURE, &tex, sizeof(drmRadeonTexture) ); } while ( ret && errno == EAGAIN ); if ( ret ) { UNLOCK_HARDWARE( rmesa ); fprintf( stderr, "DRM_RADEON_TEXTURE: return = %d\n", ret ); exit( 1 ); } }
int drm_vc4_bo_new(struct drm_vc4_bo **bop, struct drm_vc4 *drm, uint32_t flags, uint32_t size) { struct drm_vc4_create_bo args; struct drm_vc4_bo *bo; int err; if (!drm || size == 0 || !bop) return -EINVAL; bo = calloc(1, sizeof(*bo)); if (!bo) return -ENOMEM; atomic_set(&bo->ref, 1); bo->flags = flags; bo->size = size; bo->drm = drm; memset(&args, 0, sizeof(args)); args.flags = flags; args.size = size; err = drmCommandWriteRead(drm->fd, DRM_VC4_CREATE_BO, &args, sizeof(args)); if (err < 0) { err = -errno; free(bo); return err; } bo->handle = args.handle; *bop = bo; return 0; }
static void radeon_bo_set_tiling(struct pb_buffer *_buf, struct radeon_winsys_cs *rcs, enum radeon_bo_layout microtiled, enum radeon_bo_layout macrotiled, uint32_t pitch) { struct radeon_bo *bo = get_radeon_bo(_buf); struct radeon_drm_cs *cs = radeon_drm_cs(rcs); struct drm_radeon_gem_set_tiling args = {}; /* Tiling determines how DRM treats the buffer data. * We must flush CS when changing it if the buffer is referenced. */ if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) { cs->flush_cs(cs->flush_data, 0); } while (p_atomic_read(&bo->num_active_ioctls)) { sched_yield(); } if (microtiled == RADEON_LAYOUT_TILED) args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE; else if (microtiled == RADEON_LAYOUT_SQUARETILED) args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE_SQUARE; if (macrotiled == RADEON_LAYOUT_TILED) args.tiling_flags |= RADEON_BO_FLAGS_MACRO_TILE; args.handle = bo->handle; args.pitch = pitch; drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_SET_TILING, &args, sizeof(args)); }
static int cs_gem_emit(struct radeon_cs_int *cs) { struct cs_gem *csg = (struct cs_gem*)cs; uint64_t chunk_array[2]; unsigned i; int r; while (cs->cdw & 7) radeon_cs_write_dword((struct radeon_cs *)cs, 0x80000000); #if CS_BOF_DUMP cs_gem_dump_bof(cs); #endif csg->chunks[0].length_dw = cs->cdw; chunk_array[0] = (uint64_t)(uintptr_t)&csg->chunks[0]; chunk_array[1] = (uint64_t)(uintptr_t)&csg->chunks[1]; csg->cs.num_chunks = 2; csg->cs.chunks = (uint64_t)(uintptr_t)chunk_array; r = drmCommandWriteRead(cs->csm->fd, DRM_RADEON_CS, &csg->cs, sizeof(struct drm_radeon_cs)); for (i = 0; i < csg->base.crelocs; i++) { csg->relocs_bo[i]->space_accounted = 0; /* bo might be referenced from another context so have to use atomic opertions */ atomic_dec((atomic_t *)radeon_gem_get_reloc_in_cs((struct radeon_bo*)csg->relocs_bo[i]), cs->id); radeon_bo_unref((struct radeon_bo *)csg->relocs_bo[i]); csg->relocs_bo[i] = NULL; } cs->csm->read_used = 0; cs->csm->vram_write_used = 0; cs->csm->gart_write_used = 0; return r; }
static void radeon_drm_cs_emit_ioctl_oneshot(struct radeon_cs_context *csc) { unsigned i; if (drmCommandWriteRead(csc->fd, DRM_RADEON_CS, &csc->cs, sizeof(struct drm_radeon_cs))) { if (debug_get_bool_option("RADEON_DUMP_CS", FALSE)) { unsigned i; fprintf(stderr, "radeon: The kernel rejected CS, dumping...\n"); for (i = 0; i < csc->chunks[0].length_dw; i++) { fprintf(stderr, "0x%08X\n", csc->buf[i]); } } else { fprintf(stderr, "radeon: The kernel rejected CS, " "see dmesg for more information.\n"); } } for (i = 0; i < csc->crelocs; i++) p_atomic_dec(&csc->relocs_bo[i]->num_active_ioctls); radeon_cs_context_cleanup(csc); }