static int nvhost_ioctl_channel_read_3d_reg( struct nvhost_channel_userctx *ctx, struct nvhost_read_3d_reg_args *args) { BUG_ON(!channel_op(ctx->ch).read3dreg); return channel_op(ctx->ch).read3dreg(ctx->ch, ctx->hwctx, args->offset, &args->value); }
unsigned long acl_channel_recvul_nb(ACL_CHANNEL *c) { unsigned long val; channel_op(c, CHANRCV, &val, 0); return val; }
void *acl_channel_recvp_nb(ACL_CHANNEL *c) { void *v; channel_op(c, CHANRCV, (void *) &v, 0); return v; }
static void finish_shutdown_channel(void *cd, int success) { channel_data *chand = cd; grpc_channel_op op; op.type = GRPC_CHANNEL_DISCONNECT; op.dir = GRPC_CALL_DOWN; channel_op(grpc_channel_stack_element( grpc_channel_get_channel_stack(chand->channel), 0), NULL, &op); grpc_channel_internal_unref(chand->channel); }
static void finish_shutdown_channel(void *p, int success) { shutdown_channel_args *sca = p; grpc_channel_op op; if (sca->send_goaway) { op.type = GRPC_CHANNEL_GOAWAY; op.dir = GRPC_CALL_DOWN; op.data.goaway.status = GRPC_STATUS_OK; op.data.goaway.message = gpr_slice_from_copied_string("Server shutdown"); channel_op(grpc_channel_stack_element( grpc_channel_get_channel_stack(sca->chand->channel), 0), NULL, &op); } if (sca->send_disconnect) { op.type = GRPC_CHANNEL_DISCONNECT; op.dir = GRPC_CALL_DOWN; channel_op(grpc_channel_stack_element( grpc_channel_get_channel_stack(sca->chand->channel), 0), NULL, &op); } GRPC_CHANNEL_INTERNAL_UNREF(sca->chand->channel, "shutdown"); gpr_free(sca); }
/* Memory allocation for all supported channels */ int nvhost_alloc_channels(struct nvhost_master *host) { struct nvhost_channel *ch; int index, err; host->chlist = kzalloc(nvhost_channel_nb_channels(host) * sizeof(struct nvhost_channel *), GFP_KERNEL); if (host->chlist == NULL) return -ENOMEM; mutex_init(&host->chlist_mutex); for (index = 0; index < nvhost_channel_nb_channels(host); index++) { ch = kzalloc(sizeof(*ch), GFP_KERNEL); if (!ch) { dev_err(&host->dev->dev, "failed to alloc channels\n"); return -ENOMEM; } /* initialize data structures */ nvhost_set_chanops(ch); mutex_init(&ch->submitlock); mutex_init(&ch->syncpts_lock); ch->chid = nvhost_channel_get_id_from_index(host, index); /* initialize channel cdma */ err = nvhost_cdma_init(host->dev, &ch->cdma); if (err) { dev_err(&host->dev->dev, "failed to initialize cdma\n"); return err; } /* initialize hw specifics */ err = channel_op(ch).init(ch, host); if (err < 0) { dev_err(&host->dev->dev, "failed to init channel %d\n", ch->chid); return err; } /* store the channel */ host->chlist[index] = ch; } return 0; }
int nvhost_channel_submit(struct nvhost_job *job) { return channel_op(job->ch).submit(job); }
void nvhost_channel_init_gather_filter(struct nvhost_channel *ch) { if (channel_op(ch).init_gather_filter) channel_op(ch).init_gather_filter(ch); }
/* Unmap channel from device and free all resources, deinit device */ static void nvhost_channel_unmap_locked(struct kref *ref) { struct nvhost_channel *ch = container_of(ref, struct nvhost_channel, refcount); struct nvhost_device_data *pdata; struct nvhost_master *host; int i = 0; int index; if (!ch->dev) { pr_err("%s: freeing unmapped channel\n", __func__); return; } pdata = platform_get_drvdata(ch->dev); host = nvhost_get_host(pdata->pdev); /* turn off channel cdma */ channel_cdma_op().stop(&ch->cdma); if (channel_op(ch).set_low_ch_prio) channel_op(ch).set_low_ch_prio(ch); /* log this event */ dev_dbg(&ch->dev->dev, "channel %d un-mapped\n", ch->chid); trace_nvhost_channel_unmap_locked(pdata->pdev->name, ch->chid, pdata->num_mapped_chs); /* Release channel syncpoints */ for (i = 0; i < NVHOST_MODULE_MAX_SYNCPTS; ++i) { /* skip over unused syncpoints */ if (!ch->syncpts[i]) continue; /* first, mark syncpoint as unused by hardware */ nvhost_syncpt_mark_unused(&host->syncpt, ch->syncpts[i]); /* drop syncpoints reference if we allocate syncpoints * per channels */ if (pdata->resource_policy == RESOURCE_PER_DEVICE) nvhost_syncpt_put_ref(&host->syncpt, ch->syncpts[i]); /* finally, clear information from channel bookkeeping */ ch->syncpts[i] = 0; } if (ch->client_managed_syncpt) { /* mark syncpoint as unused */ nvhost_syncpt_mark_unused(&host->syncpt, ch->client_managed_syncpt); /* release it */ if (pdata->resource_policy == RESOURCE_PER_DEVICE) nvhost_syncpt_put_ref(&host->syncpt, ch->client_managed_syncpt); /* ..and handle bookkeeping */ ch->client_managed_syncpt = 0; } /* drop reference to the vm */ nvhost_vm_put(ch->vm); mutex_lock(&host->chlist_mutex); index = nvhost_channel_get_index_from_id(host, ch->chid); clear_bit(index, host->allocated_channels); ch->dev = NULL; ch->identifier = NULL; mutex_unlock(&host->chlist_mutex); }
int acl_channel_sendul_nb(ACL_CHANNEL *c, unsigned long val) { return channel_op(c, CHANSND, &val, 0); }
int acl_channel_sendp_nb(ACL_CHANNEL *c, void *v) { return channel_op(c, CHANSND, (void *) &v, 0); }
int acl_channel_recv_nb(ACL_CHANNEL *c, void *v) { return channel_op(c, CHANRCV, v, 0); }
int acl_channel_send(ACL_CHANNEL *c, void *v) { return channel_op(c, CHANSND, v, 1); }