int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct radeon_device *rdev = dev->dev_private; struct radeon_cs_parser parser; int r; down_read(&rdev->exclusive_lock); if (!rdev->accel_working) { up_read(&rdev->exclusive_lock); return -EBUSY; } /* initialize parser */ memset(&parser, 0, sizeof(struct radeon_cs_parser)); parser.filp = filp; parser.rdev = rdev; parser.dev = rdev->dev; parser.family = rdev->family; r = radeon_cs_parser_init(&parser, data); if (r) { DRM_ERROR("Failed to initialize parser !\n"); radeon_cs_parser_fini(&parser, r, false); up_read(&rdev->exclusive_lock); r = radeon_cs_handle_lockup(rdev, r); return r; } r = radeon_cs_ib_fill(rdev, &parser); if (!r) { r = radeon_cs_parser_relocs(&parser); if (r && r != -ERESTARTSYS) DRM_ERROR("Failed to parse relocation %d!\n", r); } if (r) { radeon_cs_parser_fini(&parser, r, false); up_read(&rdev->exclusive_lock); r = radeon_cs_handle_lockup(rdev, r); return r; } trace_radeon_cs(&parser); r = radeon_cs_ib_chunk(rdev, &parser); if (r) { goto out; } r = radeon_cs_ib_vm_chunk(rdev, &parser); if (r) { goto out; } out: radeon_cs_parser_fini(&parser, r, true); up_read(&rdev->exclusive_lock); r = radeon_cs_handle_lockup(rdev, r); return r; }
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct radeon_device *rdev = dev->dev_private; struct radeon_cs_parser parser; struct radeon_cs_chunk *ib_chunk; int r; mutex_lock(&rdev->cs_mutex); if (rdev->gpu_lockup) { mutex_unlock(&rdev->cs_mutex); return -EINVAL; } memset(&parser, 0, sizeof(struct radeon_cs_parser)); parser.filp = filp; parser.rdev = rdev; r = radeon_cs_parser_init(&parser, data); if (r) { DRM_ERROR("Failed to initialize parser !\n"); radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_ib_get(rdev, &parser.ib); if (r) { DRM_ERROR("Failed to get ib !\n"); radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_cs_parser_relocs(&parser); if (r) { DRM_ERROR("Failed to parse relocation !\n"); radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; } ib_chunk = &parser.chunks[parser.chunk_ib_idx]; parser.ib->length_dw = ib_chunk->length_dw; r = radeon_cs_parse(&parser); if (r || parser.parser_error) { DRM_ERROR("Invalid command stream !\n"); radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_cs_finish_pages(&parser); if (r) { DRM_ERROR("Invalid command stream !\n"); radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_ib_schedule(rdev, parser.ib); if (r) { DRM_ERROR("Faild to schedule IB !\n"); } radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; }
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct radeon_device *rdev = dev->dev_private; struct radeon_cs_parser parser; struct radeon_cs_chunk *ib_chunk; int r; mutex_lock(&rdev->cs_mutex); /* initialize parser */ memset(&parser, 0, sizeof(struct radeon_cs_parser)); parser.filp = filp; parser.rdev = rdev; parser.dev = rdev->dev; parser.family = rdev->family; r = radeon_cs_parser_init(&parser, data); if (r) { DRM_ERROR("Failed to initialize parser !\n"); radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_ib_get(rdev, &parser.ib); if (r) { DRM_ERROR("Failed to get ib !\n"); radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_cs_parser_relocs(&parser); if (r) { if (r != -ERESTARTSYS) DRM_ERROR("Failed to parse relocation %d!\n", r); radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; } /* Copy the packet into the IB, the parser will read from the * input memory (cached) and write to the IB (which can be * uncached). */ ib_chunk = &parser.chunks[parser.chunk_ib_idx]; parser.ib->length_dw = ib_chunk->length_dw; r = radeon_cs_parse(&parser); if (r || parser.parser_error) { DRM_ERROR("Invalid command stream !\n"); radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_cs_finish_pages(&parser); if (r) { DRM_ERROR("Invalid command stream !\n"); radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; } r = radeon_ib_schedule(rdev, parser.ib); if (r) { DRM_ERROR("Failed to schedule IB !\n"); } radeon_cs_parser_fini(&parser, r); mutex_unlock(&rdev->cs_mutex); return r; }
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct radeon_device *rdev = dev->dev_private; struct radeon_cs_parser parser; int r; lockmgr(&rdev->exclusive_lock, LK_EXCLUSIVE); if (!rdev->accel_working) { lockmgr(&rdev->exclusive_lock, LK_RELEASE); return -EBUSY; } if (rdev->in_reset) { lockmgr(&rdev->exclusive_lock, LK_RELEASE); r = radeon_gpu_reset(rdev); if (!r) r = -EAGAIN; return r; } /* initialize parser */ memset(&parser, 0, sizeof(struct radeon_cs_parser)); parser.filp = filp; parser.rdev = rdev; parser.dev = rdev->dev; parser.family = rdev->family; r = radeon_cs_parser_init(&parser, data); if (r) { DRM_ERROR("Failed to initialize parser !\n"); radeon_cs_parser_fini(&parser, r, false); lockmgr(&rdev->exclusive_lock, LK_RELEASE); r = radeon_cs_handle_lockup(rdev, r); return r; } r = radeon_cs_ib_fill(rdev, &parser); if (!r) { r = radeon_cs_parser_relocs(&parser); if (r && r != -ERESTARTSYS) DRM_ERROR("Failed to parse relocation %d!\n", r); } if (r) { radeon_cs_parser_fini(&parser, r, false); lockmgr(&rdev->exclusive_lock, LK_RELEASE); r = radeon_cs_handle_lockup(rdev, r); return r; } #if TRACE_TODO trace_radeon_cs(&parser); #endif r = radeon_cs_ib_chunk(rdev, &parser); if (r) { goto out; } r = radeon_cs_ib_vm_chunk(rdev, &parser); if (r) { goto out; } out: radeon_cs_parser_fini(&parser, r, true); lockmgr(&rdev->exclusive_lock, LK_RELEASE); r = radeon_cs_handle_lockup(rdev, r); return r; }