static int gk20a_as_ioctl_bind_channel( struct gk20a_as_share *as_share, struct nvhost_as_bind_channel_args *args) { int err = 0; struct channel_gk20a *ch; gk20a_dbg_fn(""); ch = gk20a_get_channel_from_file(args->channel_fd); if (!ch || gk20a_channel_as_bound(ch)) return -EINVAL; atomic_inc(&as_share->ref_cnt); /* this will set channel_gk20a->vm */ err = gk20a_vm_bind_channel(as_share, ch); if (err) { atomic_dec(&as_share->ref_cnt); return err; } return err; }
static int vgpu_gr_alloc_obj_ctx(struct channel_gk20a *c, struct nvgpu_alloc_obj_ctx_args *args) { struct gk20a *g = c->g; struct fifo_gk20a *f = &g->fifo; struct channel_ctx_gk20a *ch_ctx = &c->ch_ctx; struct tsg_gk20a *tsg = NULL; int err = 0; gk20a_dbg_fn(""); /* an address space needs to have been bound at this point.*/ if (!gk20a_channel_as_bound(c)) { gk20a_err(dev_from_gk20a(g), "not bound to address space at time" " of grctx allocation"); return -EINVAL; } if (!g->ops.gr.is_valid_class(g, args->class_num)) { gk20a_err(dev_from_gk20a(g), "invalid obj class 0x%x", args->class_num); err = -EINVAL; goto out; } c->obj_class = args->class_num; /* FIXME: add TSG support */ if (gk20a_is_channel_marked_as_tsg(c)) tsg = &f->tsg[c->tsgid]; /* allocate gr ctx buffer */ if (!ch_ctx->gr_ctx) { err = vgpu_gr_alloc_channel_gr_ctx(g, c); if (err) { gk20a_err(dev_from_gk20a(g), "fail to allocate gr ctx buffer"); goto out; } } else { /*TBD: needs to be more subtle about which is * being allocated as some are allowed to be * allocated along same channel */ gk20a_err(dev_from_gk20a(g), "too many classes alloc'd on same channel"); err = -EINVAL; goto out; } /* commit gr ctx buffer */ err = vgpu_gr_commit_inst(c, ch_ctx->gr_ctx->mem.gpu_va); if (err) { gk20a_err(dev_from_gk20a(g), "fail to commit gr ctx buffer"); goto out; } /* allocate patch buffer */ if (ch_ctx->patch_ctx.mem.pages == NULL) { err = vgpu_gr_alloc_channel_patch_ctx(g, c); if (err) { gk20a_err(dev_from_gk20a(g), "fail to allocate patch buffer"); goto out; } } /* map global buffer to channel gpu_va and commit */ if (!ch_ctx->global_ctx_buffer_mapped) { err = vgpu_gr_map_global_ctx_buffers(g, c); if (err) { gk20a_err(dev_from_gk20a(g), "fail to map global ctx buffer"); goto out; } gr_gk20a_elpg_protected_call(g, vgpu_gr_commit_global_ctx_buffers(g, c, true)); } /* load golden image */ if (!c->first_init) { err = gr_gk20a_elpg_protected_call(g, vgpu_gr_load_golden_ctx_image(g, c)); if (err) { gk20a_err(dev_from_gk20a(g), "fail to load golden ctx image"); goto out; } c->first_init = true; } c->num_objects++; gk20a_dbg_fn("done"); return 0; out: /* 1. gr_ctx, patch_ctx and global ctx buffer mapping can be reused so no need to release them. 2. golden image load is a one time thing so if they pass, no need to undo. */ gk20a_err(dev_from_gk20a(g), "fail"); return err; }