static long afu_ioctl_start_work(struct cxl_context *ctx, struct cxl_ioctl_start_work __user *uwork) { struct cxl_ioctl_start_work work; u64 amr = 0; int rc; pr_devel("%s: pe: %i\n", __func__, ctx->pe); /* Do this outside the status_mutex to avoid a circular dependency with * the locking in cxl_mmap_fault() */ if (copy_from_user(&work, uwork, sizeof(struct cxl_ioctl_start_work))) { rc = -EFAULT; goto out; } mutex_lock(&ctx->status_mutex); if (ctx->status != OPENED) { rc = -EIO; goto out; } /* * if any of the reserved fields are set or any of the unused * flags are set it's invalid */ if (work.reserved1 || work.reserved2 || work.reserved3 || work.reserved4 || work.reserved5 || work.reserved6 || (work.flags & ~CXL_START_WORK_ALL)) { rc = -EINVAL; goto out; } if (!(work.flags & CXL_START_WORK_NUM_IRQS)) work.num_interrupts = ctx->afu->pp_irqs; else if ((work.num_interrupts < ctx->afu->pp_irqs) || (work.num_interrupts > ctx->afu->irqs_max)) { rc = -EINVAL; goto out; } if ((rc = afu_register_irqs(ctx, work.num_interrupts))) goto out; if (work.flags & CXL_START_WORK_AMR) amr = work.amr & mfspr(SPRN_UAMOR); ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF); /* * Increment the mapped context count for adapter. This also checks * if adapter_context_lock is taken. */ rc = cxl_adapter_context_get(ctx->afu->adapter); if (rc) { afu_release_irqs(ctx, ctx); goto out; } /* * We grab the PID here and not in the file open to allow for the case * where a process (master, some daemon, etc) has opened the chardev on * behalf of another process, so the AFU's mm gets bound to the process * that performs this ioctl and not the process that opened the file. * Also we grab the PID of the group leader so that if the task that * has performed the attach operation exits the mm context of the * process is still accessible. */ ctx->pid = get_task_pid(current, PIDTYPE_PID); ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID); trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr); if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor, amr))) { afu_release_irqs(ctx, ctx); cxl_adapter_context_put(ctx->afu->adapter); put_pid(ctx->glpid); put_pid(ctx->pid); ctx->glpid = ctx->pid = NULL; goto out; } ctx->status = STARTED; rc = 0; out: mutex_unlock(&ctx->status_mutex); return rc; }
/* * Start a context * Code here similar to afu_ioctl_start_work(). */ int cxl_start_context(struct cxl_context *ctx, u64 wed, struct task_struct *task) { int rc = 0; bool kernel = true; pr_devel("%s: pe: %i\n", __func__, ctx->pe); mutex_lock(&ctx->status_mutex); if (ctx->status == STARTED) goto out; /* already started */ /* * Increment the mapped context count for adapter. This also checks * if adapter_context_lock is taken. */ rc = cxl_adapter_context_get(ctx->afu->adapter); if (rc) goto out; if (task) { ctx->pid = get_task_pid(task, PIDTYPE_PID); kernel = false; /* acquire a reference to the task's mm */ ctx->mm = get_task_mm(current); /* ensure this mm_struct can't be freed */ cxl_context_mm_count_get(ctx); if (ctx->mm) { /* decrement the use count from above */ mmput(ctx->mm); /* make TLBIs for this context global */ mm_context_add_copro(ctx->mm); } } /* * Increment driver use count. Enables global TLBIs for hash * and callbacks to handle the segment table */ cxl_ctx_get(); /* See the comment in afu_ioctl_start_work() */ smp_mb(); if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) { put_pid(ctx->pid); ctx->pid = NULL; cxl_adapter_context_put(ctx->afu->adapter); cxl_ctx_put(); if (task) { cxl_context_mm_count_put(ctx); if (ctx->mm) mm_context_remove_copro(ctx->mm); } goto out; } ctx->status = STARTED; out: mutex_unlock(&ctx->status_mutex); return rc; }