void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call, grpc_completion_queue *cq) { GPR_ASSERT(cq); call->cq = cq; GRPC_CQ_INTERNAL_REF(cq, "bind"); grpc_call_stack_set_pollset(exec_ctx, CALL_STACK_FROM_CALL(call), grpc_cq_pollset(cq)); }
void grpc_server_register_completion_queue(grpc_server *server, grpc_completion_queue *cq) { size_t i, n; for (i = 0; i < server->cq_count; i++) { if (server->cqs[i] == cq) return; } GRPC_CQ_INTERNAL_REF(cq, "server"); grpc_cq_mark_server_cq(cq); n = server->cq_count++; server->cqs = gpr_realloc(server->cqs, server->cq_count * sizeof(grpc_completion_queue *)); server->cqs[n] = cq; }
void grpc_alarm_set(grpc_alarm *alarm, grpc_completion_queue *cq, gpr_timespec deadline, void *tag, void *reserved) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; GRPC_CQ_INTERNAL_REF(cq, "alarm"); alarm->cq = cq; alarm->tag = tag; GPR_ASSERT(grpc_cq_begin_op(cq, tag)); grpc_timer_init(&exec_ctx, &alarm->alarm, gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC), &alarm->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC)); grpc_exec_ctx_finish(&exec_ctx); }
grpc_event grpc_completion_queue_next(grpc_completion_queue *cc, gpr_timespec deadline, void *reserved) { grpc_event ret; grpc_pollset_worker worker; int first_loop = 1; gpr_timespec now; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; GPR_ASSERT(!reserved); deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); GRPC_CQ_INTERNAL_REF(cc, "next"); gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); for (;;) { if (cc->completed_tail != &cc->completed_head) { grpc_cq_completion *c = (grpc_cq_completion *)cc->completed_head.next; cc->completed_head.next = c->next & ~(gpr_uintptr)1; if (c == cc->completed_tail) { cc->completed_tail = &cc->completed_head; } gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); ret.type = GRPC_OP_COMPLETE; ret.success = c->next & 1u; ret.tag = c->tag; c->done(&exec_ctx, c->done_arg, c); break; } if (cc->shutdown) { gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); memset(&ret, 0, sizeof(ret)); ret.type = GRPC_QUEUE_SHUTDOWN; break; } now = gpr_now(GPR_CLOCK_MONOTONIC); if (!first_loop && gpr_time_cmp(now, deadline) >= 0) { gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); memset(&ret, 0, sizeof(ret)); ret.type = GRPC_QUEUE_TIMEOUT; break; } first_loop = 0; grpc_pollset_work(&exec_ctx, &cc->pollset, &worker, now, deadline); } GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret); GRPC_CQ_INTERNAL_UNREF(cc, "next"); grpc_exec_ctx_finish(&exec_ctx); return ret; }
grpc_alarm *grpc_alarm_create(grpc_completion_queue *cq, gpr_timespec deadline, void *tag) { grpc_alarm *alarm = gpr_malloc(sizeof(grpc_alarm)); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; GRPC_CQ_INTERNAL_REF(cq, "alarm"); alarm->cq = cq; alarm->tag = tag; grpc_timer_init(&exec_ctx, &alarm->alarm, deadline, alarm_cb, alarm, gpr_now(GPR_CLOCK_MONOTONIC)); grpc_cq_begin_op(cq, tag); grpc_exec_ctx_finish(&exec_ctx); return alarm; }
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag, gpr_timespec deadline) { grpc_event ret; grpc_cq_completion *c; grpc_cq_completion *prev; deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); GRPC_CQ_INTERNAL_REF(cc, "pluck"); gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); for (;;) { prev = &cc->completed_head; while ((c = (grpc_cq_completion *)(prev->next & ~(gpr_uintptr)1)) != &cc->completed_head) { if (c->tag == tag) { prev->next = (prev->next & (gpr_uintptr)1) | (c->next & ~(gpr_uintptr)1); if (c == cc->completed_tail) { cc->completed_tail = prev; } gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); ret.type = GRPC_OP_COMPLETE; ret.success = c->next & 1u; ret.tag = c->tag; c->done(c->done_arg, c); goto done; } prev = c; } if (cc->shutdown) { gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); memset(&ret, 0, sizeof(ret)); ret.type = GRPC_QUEUE_SHUTDOWN; break; } if (!grpc_pollset_work(&cc->pollset, deadline)) { gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); memset(&ret, 0, sizeof(ret)); ret.type = GRPC_QUEUE_TIMEOUT; break; } } done: GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret); GRPC_CQ_INTERNAL_UNREF(cc, "pluck"); return ret; }
void grpc_server_register_completion_queue(grpc_server *server, grpc_completion_queue *cq, void *reserved) { size_t i, n; GRPC_API_TRACE( "grpc_server_register_completion_queue(server=%p, cq=%p, reserved=%p)", 3, (server, cq, reserved)); GPR_ASSERT(!reserved); for (i = 0; i < server->cq_count; i++) { if (server->cqs[i] == cq) return; } GRPC_CQ_INTERNAL_REF(cq, "server"); grpc_cq_mark_server_cq(cq); n = server->cq_count++; server->cqs = gpr_realloc(server->cqs, server->cq_count * sizeof(grpc_completion_queue *)); server->cqs[n] = cq; }
static void register_completion_queue(grpc_server *server, grpc_completion_queue *cq, bool is_non_listening, void *reserved) { size_t i, n; GPR_ASSERT(!reserved); for (i = 0; i < server->cq_count; i++) { if (server->cqs[i] == cq) return; } grpc_cq_mark_server_cq(cq); if (is_non_listening) { grpc_cq_mark_non_listening_server_cq(cq); } GRPC_CQ_INTERNAL_REF(cq, "server"); n = server->cq_count++; server->cqs = gpr_realloc(server->cqs, server->cq_count * sizeof(grpc_completion_queue *)); server->cqs[n] = cq; }
grpc_event grpc_completion_queue_next(grpc_completion_queue *cc, gpr_timespec deadline) { grpc_event ret; deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); GRPC_CQ_INTERNAL_REF(cc, "next"); gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); for (;;) { if (cc->completed_tail != &cc->completed_head) { grpc_cq_completion *c = (grpc_cq_completion *)cc->completed_head.next; cc->completed_head.next = c->next & ~(gpr_uintptr)1; if (c == cc->completed_tail) { cc->completed_tail = &cc->completed_head; } gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); ret.type = GRPC_OP_COMPLETE; ret.success = c->next & 1u; ret.tag = c->tag; c->done(c->done_arg, c); break; } if (cc->shutdown) { gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); memset(&ret, 0, sizeof(ret)); ret.type = GRPC_QUEUE_SHUTDOWN; break; } if (!grpc_pollset_work(&cc->pollset, deadline)) { gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); memset(&ret, 0, sizeof(ret)); ret.type = GRPC_QUEUE_TIMEOUT; break; } } GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret); GRPC_CQ_INTERNAL_UNREF(cc, "next"); return ret; }
grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call, uint32_t propagation_mask, grpc_completion_queue *cq, const void *server_transport_data, grpc_mdelem **add_initial_metadata, size_t add_initial_metadata_count, gpr_timespec send_deadline) { size_t i, j; grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_call *call; GPR_TIMER_BEGIN("grpc_call_create", 0); call = gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size); memset(call, 0, sizeof(grpc_call)); gpr_mu_init(&call->mu); call->channel = channel; call->cq = cq; call->parent = parent_call; call->is_client = server_transport_data == NULL; if (call->is_client) { GPR_ASSERT(add_initial_metadata_count < MAX_SEND_EXTRA_METADATA_COUNT); for (i = 0; i < add_initial_metadata_count; i++) { call->send_extra_metadata[i].md = add_initial_metadata[i]; } call->send_extra_metadata_count = (int)add_initial_metadata_count; } else { GPR_ASSERT(add_initial_metadata_count == 0); call->send_extra_metadata_count = 0; } for (i = 0; i < 2; i++) { for (j = 0; j < 2; j++) { call->metadata_batch[i][j].deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); } } call->send_deadline = send_deadline; GRPC_CHANNEL_INTERNAL_REF(channel, "call"); /* initial refcount dropped by grpc_call_destroy */ grpc_call_stack_init(&exec_ctx, channel_stack, 1, destroy_call, call, call->context, server_transport_data, CALL_STACK_FROM_CALL(call)); if (cq != NULL) { GRPC_CQ_INTERNAL_REF(cq, "bind"); grpc_call_stack_set_pollset(&exec_ctx, CALL_STACK_FROM_CALL(call), grpc_cq_pollset(cq)); } if (parent_call != NULL) { GRPC_CALL_INTERNAL_REF(parent_call, "child"); GPR_ASSERT(call->is_client); GPR_ASSERT(!parent_call->is_client); gpr_mu_lock(&parent_call->mu); if (propagation_mask & GRPC_PROPAGATE_DEADLINE) { send_deadline = gpr_time_min( gpr_convert_clock_type(send_deadline, parent_call->send_deadline.clock_type), parent_call->send_deadline); } /* for now GRPC_PROPAGATE_TRACING_CONTEXT *MUST* be passed with * GRPC_PROPAGATE_STATS_CONTEXT */ /* TODO(ctiller): This should change to use the appropriate census start_op * call. */ if (propagation_mask & GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT) { GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT); grpc_call_context_set(call, GRPC_CONTEXT_TRACING, parent_call->context[GRPC_CONTEXT_TRACING].value, NULL); } else { GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT); } if (propagation_mask & GRPC_PROPAGATE_CANCELLATION) { call->cancellation_is_inherited = 1; } if (parent_call->first_child == NULL) { parent_call->first_child = call; call->sibling_next = call->sibling_prev = call; } else { call->sibling_next = parent_call->first_child; call->sibling_prev = parent_call->first_child->sibling_prev; call->sibling_next->sibling_prev = call->sibling_prev->sibling_next = call; } gpr_mu_unlock(&parent_call->mu); } if (gpr_time_cmp(send_deadline, gpr_inf_future(send_deadline.clock_type)) != 0) { set_deadline_alarm(&exec_ctx, call, send_deadline); } grpc_exec_ctx_finish(&exec_ctx); GPR_TIMER_END("grpc_call_create", 0); return call; }
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag, gpr_timespec deadline, void *reserved) { grpc_event ret; grpc_cq_completion *c; grpc_cq_completion *prev; grpc_pollset_worker worker; gpr_timespec now; int first_loop = 1; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; GPR_ASSERT(!reserved); deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); GRPC_CQ_INTERNAL_REF(cc, "pluck"); gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); for (;;) { prev = &cc->completed_head; while ((c = (grpc_cq_completion *)(prev->next & ~(gpr_uintptr)1)) != &cc->completed_head) { if (c->tag == tag) { prev->next = (prev->next & (gpr_uintptr)1) | (c->next & ~(gpr_uintptr)1); if (c == cc->completed_tail) { cc->completed_tail = prev; } gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); ret.type = GRPC_OP_COMPLETE; ret.success = c->next & 1u; ret.tag = c->tag; c->done(&exec_ctx, c->done_arg, c); goto done; } prev = c; } if (cc->shutdown) { gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); memset(&ret, 0, sizeof(ret)); ret.type = GRPC_QUEUE_SHUTDOWN; break; } if (!add_plucker(cc, tag, &worker)) { gpr_log(GPR_DEBUG, "Too many outstanding grpc_completion_queue_pluck calls: maximum " "is %d", GRPC_MAX_COMPLETION_QUEUE_PLUCKERS); gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); memset(&ret, 0, sizeof(ret)); /* TODO(ctiller): should we use a different result here */ ret.type = GRPC_QUEUE_TIMEOUT; break; } now = gpr_now(GPR_CLOCK_MONOTONIC); if (!first_loop && gpr_time_cmp(now, deadline) >= 0) { del_plucker(cc, tag, &worker); gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); memset(&ret, 0, sizeof(ret)); ret.type = GRPC_QUEUE_TIMEOUT; break; } first_loop = 0; grpc_pollset_work(&exec_ctx, &cc->pollset, &worker, now, deadline); del_plucker(cc, tag, &worker); } done: GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret); GRPC_CQ_INTERNAL_UNREF(cc, "pluck"); grpc_exec_ctx_finish(&exec_ctx); return ret; }