static int kfd_ioctl_set_scratch_backing_va(struct file *filep, struct kfd_process *p, void *data) { struct kfd_ioctl_set_scratch_backing_va_args *args = data; struct kfd_process_device *pdd; struct kfd_dev *dev; long err; dev = kfd_device_by_id(args->gpu_id); if (!dev) return -EINVAL; mutex_lock(&p->mutex); pdd = kfd_bind_process_to_device(dev, p); if (IS_ERR(pdd)) { err = PTR_ERR(pdd); goto bind_process_to_device_fail; } pdd->qpd.sh_hidden_private_base = args->va_addr; mutex_unlock(&p->mutex); if (sched_policy == KFD_SCHED_POLICY_NO_HWS && pdd->qpd.vmid != 0) dev->kfd2kgd->set_scratch_backing_va( dev->kgd, args->va_addr, pdd->qpd.vmid); return 0; bind_process_to_device_fail: mutex_unlock(&p->mutex); return err; }
static int kfd_ioctl_set_trap_handler(struct file *filep, struct kfd_process *p, void *data) { struct kfd_ioctl_set_trap_handler_args *args = data; struct kfd_dev *dev; int err = 0; struct kfd_process_device *pdd; dev = kfd_device_by_id(args->gpu_id); if (dev == NULL) return -EINVAL; mutex_lock(&p->mutex); pdd = kfd_bind_process_to_device(dev, p); if (IS_ERR(pdd)) { err = -ESRCH; goto out; } if (dev->dqm->ops.set_trap_handler(dev->dqm, &pdd->qpd, args->tba_addr, args->tma_addr)) err = -EINVAL; out: mutex_unlock(&p->mutex); return err; }
static long kfd_ioctl_set_memory_policy(struct file *filep, struct kfd_process *p, void __user *arg) { struct kfd_ioctl_set_memory_policy_args args; struct kfd_dev *dev; int err = 0; struct kfd_process_device *pdd; enum cache_policy default_policy, alternate_policy; if (copy_from_user(&args, arg, sizeof(args))) return -EFAULT; if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { return -EINVAL; } if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { return -EINVAL; } dev = kfd_device_by_id(args.gpu_id); if (dev == NULL) return -EINVAL; mutex_lock(&p->mutex); pdd = kfd_bind_process_to_device(dev, p); if (IS_ERR(pdd)) { err = PTR_ERR(pdd); goto out; } default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT) ? cache_policy_coherent : cache_policy_noncoherent; alternate_policy = (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) ? cache_policy_coherent : cache_policy_noncoherent; if (!dev->dqm->set_cache_memory_policy(dev->dqm, &pdd->qpd, default_policy, alternate_policy, (void __user *)args.alternate_aperture_base, args.alternate_aperture_size)) err = -EINVAL; out: mutex_unlock(&p->mutex); return err; }
static int kfd_ioctl_dbg_register(struct file *filep, struct kfd_process *p, void *data) { struct kfd_ioctl_dbg_register_args *args = data; struct kfd_dev *dev; struct kfd_dbgmgr *dbgmgr_ptr; struct kfd_process_device *pdd; bool create_ok; long status = 0; dev = kfd_device_by_id(args->gpu_id); if (dev == NULL) return -EINVAL; if (dev->device_info->asic_family == CHIP_CARRIZO) { pr_debug("kfd_ioctl_dbg_register not supported on CZ\n"); return -EINVAL; } mutex_lock(kfd_get_dbgmgr_mutex()); mutex_lock(&p->mutex); /* * make sure that we have pdd, if this the first queue created for * this process */ pdd = kfd_bind_process_to_device(dev, p); if (IS_ERR(pdd)) { mutex_unlock(&p->mutex); mutex_unlock(kfd_get_dbgmgr_mutex()); return PTR_ERR(pdd); } if (dev->dbgmgr == NULL) { /* In case of a legal call, we have no dbgmgr yet */ create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev); if (create_ok) { status = kfd_dbgmgr_register(dbgmgr_ptr, p); if (status != 0) kfd_dbgmgr_destroy(dbgmgr_ptr); else dev->dbgmgr = dbgmgr_ptr; } } else { pr_debug("debugger already registered\n"); status = -EINVAL; } mutex_unlock(&p->mutex); mutex_unlock(kfd_get_dbgmgr_mutex()); return status; }
static int kfd_ioctl_set_memory_policy(struct file *filep, struct kfd_process *p, void *data) { struct kfd_ioctl_set_memory_policy_args *args = data; struct kfd_dev *dev; int err = 0; struct kfd_process_device *pdd; enum cache_policy default_policy, alternate_policy; if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { return -EINVAL; } if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { return -EINVAL; } dev = kfd_device_by_id(args->gpu_id); if (!dev) return -EINVAL; mutex_lock(&p->mutex); pdd = kfd_bind_process_to_device(dev, p); if (IS_ERR(pdd)) { err = -ESRCH; goto out; } default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT) ? cache_policy_coherent : cache_policy_noncoherent; alternate_policy = (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) ? cache_policy_coherent : cache_policy_noncoherent; if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm, &pdd->qpd, default_policy, alternate_policy, (void __user *)args->alternate_aperture_base, args->alternate_aperture_size)) err = -EINVAL; out: mutex_unlock(&p->mutex); return err; }
static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, void *data) { struct kfd_ioctl_create_queue_args *args = data; struct kfd_dev *dev; int err = 0; unsigned int queue_id; struct kfd_process_device *pdd; struct queue_properties q_properties; memset(&q_properties, 0, sizeof(struct queue_properties)); pr_debug("kfd: creating queue ioctl\n"); err = set_queue_properties_from_user(&q_properties, args); if (err) return err; pr_debug("kfd: looking for gpu id 0x%x\n", args->gpu_id); dev = kfd_device_by_id(args->gpu_id); if (dev == NULL) { pr_debug("kfd: gpu id 0x%x was not found\n", args->gpu_id); return -EINVAL; } mutex_lock(&p->mutex); pdd = kfd_bind_process_to_device(dev, p); if (IS_ERR(pdd)) { err = -ESRCH; goto err_bind_process; } pr_debug("kfd: creating queue for PASID %d on GPU 0x%x\n", p->pasid, dev->id); err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, 0, q_properties.type, &queue_id); if (err != 0) goto err_create_queue; args->queue_id = queue_id; /* Return gpu_id as doorbell offset for mmap usage */ args->doorbell_offset = (KFD_MMAP_DOORBELL_MASK | args->gpu_id); args->doorbell_offset <<= PAGE_SHIFT; mutex_unlock(&p->mutex); pr_debug("kfd: queue id %d was created successfully\n", args->queue_id); pr_debug("ring buffer address == 0x%016llX\n", args->ring_base_address); pr_debug("read ptr address == 0x%016llX\n", args->read_pointer_address); pr_debug("write ptr address == 0x%016llX\n", args->write_pointer_address); return 0; err_create_queue: err_bind_process: mutex_unlock(&p->mutex); return err; }
static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, void __user *arg) { struct kfd_ioctl_create_queue_args args; struct kfd_dev *dev; int err = 0; unsigned int queue_id; struct kfd_process_device *pdd; struct queue_properties q_properties; memset(&q_properties, 0, sizeof(struct queue_properties)); if (copy_from_user(&args, arg, sizeof(args))) return -EFAULT; pr_debug("kfd: creating queue ioctl\n"); err = set_queue_properties_from_user(&q_properties, &args); if (err) return err; dev = kfd_device_by_id(args.gpu_id); if (dev == NULL) return -EINVAL; mutex_lock(&p->mutex); pdd = kfd_bind_process_to_device(dev, p); if (IS_ERR(pdd)) { err = PTR_ERR(pdd); goto err_bind_process; } pr_debug("kfd: creating queue for PASID %d on GPU 0x%x\n", p->pasid, dev->id); err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, 0, KFD_QUEUE_TYPE_COMPUTE, &queue_id); if (err != 0) goto err_create_queue; args.queue_id = queue_id; /* Return gpu_id as doorbell offset for mmap usage */ args.doorbell_offset = args.gpu_id << PAGE_SHIFT; if (copy_to_user(arg, &args, sizeof(args))) { err = -EFAULT; goto err_copy_args_out; } mutex_unlock(&p->mutex); pr_debug("kfd: queue id %d was created successfully\n", args.queue_id); pr_debug("ring buffer address == 0x%016llX\n", args.ring_base_address); pr_debug("read ptr address == 0x%016llX\n", args.read_pointer_address); pr_debug("write ptr address == 0x%016llX\n", args.write_pointer_address); return 0; err_copy_args_out: pqm_destroy_queue(&p->pqm, queue_id); err_create_queue: err_bind_process: mutex_unlock(&p->mutex); return err; }