void grpc_call_destroy(grpc_call *c) { int cancel; grpc_call *parent = c->parent; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; GPR_TIMER_BEGIN("grpc_call_destroy", 0); GRPC_API_TRACE("grpc_call_destroy(c=%p)", 1, (c)); if (parent) { gpr_mu_lock(&parent->mu); if (c == parent->first_child) { parent->first_child = c->sibling_next; if (c == parent->first_child) { parent->first_child = NULL; } c->sibling_prev->sibling_next = c->sibling_next; c->sibling_next->sibling_prev = c->sibling_prev; } gpr_mu_unlock(&parent->mu); GRPC_CALL_INTERNAL_UNREF(&exec_ctx, parent, "child"); } gpr_mu_lock(&c->mu); GPR_ASSERT(!c->destroy_called); c->destroy_called = 1; if (c->have_alarm) { grpc_timer_cancel(&exec_ctx, &c->alarm); } cancel = !c->received_final_op; gpr_mu_unlock(&c->mu); if (cancel) grpc_call_cancel(c, NULL); GRPC_CALL_INTERNAL_UNREF(&exec_ctx, c, "destroy"); grpc_exec_ctx_finish(&exec_ctx); GPR_TIMER_END("grpc_call_destroy", 0); }
static void finish_batch_completion(grpc_exec_ctx *exec_ctx, void *user_data, grpc_cq_completion *storage) { batch_control *bctl = user_data; grpc_call *call = bctl->call; gpr_mu_lock(&call->mu); call->used_batches = (gpr_uint8)( call->used_batches & ~(gpr_uint8)(1 << (bctl - call->active_batches))); gpr_mu_unlock(&call->mu); GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion"); }
static void call_alarm(grpc_exec_ctx *exec_ctx, void *arg, int success) { grpc_call *call = arg; gpr_mu_lock(&call->mu); call->have_alarm = 0; if (success) { cancel_with_status(exec_ctx, call, GRPC_STATUS_DEADLINE_EXCEEDED, "Deadline Exceeded"); } gpr_mu_unlock(&call->mu); GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "alarm"); }
static void call_on_done_recv(void *pc, int success) { grpc_call *call = pc; size_t i; GRPC_TIMER_BEGIN(GRPC_PTAG_CALL_ON_DONE_RECV, 0); lock(call); call->receiving = 0; if (success) { for (i = 0; success && i < call->recv_ops.nops; i++) { grpc_stream_op *op = &call->recv_ops.ops[i]; switch (op->type) { case GRPC_NO_OP: break; case GRPC_OP_METADATA: recv_metadata(call, &op->data.metadata); break; case GRPC_OP_BEGIN_MESSAGE: success = begin_message(call, op->data.begin_message); break; case GRPC_OP_SLICE: success = add_slice_to_message(call, op->data.slice); break; } } if (!success) { grpc_stream_ops_unref_owned_objects(&call->recv_ops.ops[i], call->recv_ops.nops - i); } if (call->recv_state == GRPC_STREAM_RECV_CLOSED) { GPR_ASSERT(call->read_state <= READ_STATE_READ_CLOSED); call->read_state = READ_STATE_READ_CLOSED; } if (call->recv_state == GRPC_STREAM_CLOSED) { GPR_ASSERT(call->read_state <= READ_STATE_STREAM_CLOSED); call->read_state = READ_STATE_STREAM_CLOSED; if (call->have_alarm) { grpc_alarm_cancel(&call->alarm); call->have_alarm = 0; } } finish_read_ops(call); } else { finish_ioreq_op(call, GRPC_IOREQ_RECV_MESSAGE, 0); finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS, 0); finish_ioreq_op(call, GRPC_IOREQ_RECV_CLOSE, 0); finish_ioreq_op(call, GRPC_IOREQ_RECV_TRAILING_METADATA, 0); finish_ioreq_op(call, GRPC_IOREQ_RECV_INITIAL_METADATA, 0); finish_ioreq_op(call, GRPC_IOREQ_RECV_STATUS_DETAILS, 0); } call->recv_ops.nops = 0; unlock(call); GRPC_CALL_INTERNAL_UNREF(call, "receiving", 0); GRPC_TIMER_BEGIN(GRPC_PTAG_CALL_ON_DONE_RECV, 0); }
static void unlock(grpc_call *call) { grpc_transport_op op; completed_request completed_requests[GRPC_IOREQ_OP_COUNT]; int completing_requests = 0; int start_op = 0; int i; memset(&op, 0, sizeof(op)); if (!call->receiving && need_more_data(call)) { op.recv_ops = &call->recv_ops; op.recv_state = &call->recv_state; op.on_done_recv = call_on_done_recv; op.recv_user_data = call; call->receiving = 1; GRPC_CALL_INTERNAL_REF(call, "receiving"); start_op = 1; } if (!call->sending) { if (fill_send_ops(call, &op)) { call->sending = 1; GRPC_CALL_INTERNAL_REF(call, "sending"); start_op = 1; } } if (!call->completing && call->num_completed_requests != 0) { completing_requests = call->num_completed_requests; memcpy(completed_requests, call->completed_requests, sizeof(completed_requests)); call->num_completed_requests = 0; call->completing = 1; GRPC_CALL_INTERNAL_REF(call, "completing"); } gpr_mu_unlock(&call->mu); if (start_op) { execute_op(call, &op); } if (completing_requests > 0) { for (i = 0; i < completing_requests; i++) { completed_requests[i].on_complete(call, completed_requests[i].success, completed_requests[i].user_data); } lock(call); call->completing = 0; unlock(call); GRPC_CALL_INTERNAL_UNREF(call, "completing", 0); } }
static void call_alarm(void *arg, int success) { grpc_call *call = arg; if (success) { if (call->is_client) { cancel_with_status(call, GRPC_STATUS_DEADLINE_EXCEEDED, "Deadline Exceeded", 0); } else { grpc_call_cancel(call); } } GRPC_CALL_INTERNAL_UNREF(call, "alarm", 1); }
void grpc_call_destroy(grpc_call *c) { int cancel; lock(c); if (c->have_alarm) { grpc_alarm_cancel(&c->alarm); c->have_alarm = 0; } cancel = c->read_state != READ_STATE_STREAM_CLOSED; unlock(c); if (cancel) grpc_call_cancel(c); GRPC_CALL_INTERNAL_UNREF(c, "destroy", 1); }
static void publish_registered_or_batch(grpc_exec_ctx *exec_ctx, void *prc, bool success) { requested_call *rc = prc; grpc_call *call = *rc->call; grpc_call_element *elem = grpc_call_stack_element(grpc_call_get_call_stack(call), 0); call_data *calld = elem->call_data; channel_data *chand = elem->channel_data; server_ref(chand->server); grpc_cq_end_op(exec_ctx, calld->cq_new, rc->tag, success, done_request_event, rc, &rc->completion); GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "server"); }
static void post_batch_completion(grpc_exec_ctx *exec_ctx, batch_control *bctl) { grpc_call *call = bctl->call; if (bctl->is_notify_tag_closure) { grpc_exec_ctx_enqueue(exec_ctx, bctl->notify_tag, bctl->success); gpr_mu_lock(&call->mu); bctl->call->used_batches = (gpr_uint8)(bctl->call->used_batches & ~(gpr_uint8)(1 << (bctl - bctl->call->active_batches))); gpr_mu_unlock(&call->mu); GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion"); } else { grpc_cq_end_op(exec_ctx, bctl->call->cq, bctl->notify_tag, bctl->success, finish_batch_completion, bctl, &bctl->cq_completion); } }
void grpc_server_cancel_all_calls(grpc_server *server) { call_data *calld; grpc_call **calls; size_t call_count; size_t call_capacity; int is_first = 1; size_t i; gpr_mu_lock(&server->mu_call); GPR_ASSERT(server->shutdown); if (!server->lists[ALL_CALLS]) { gpr_mu_unlock(&server->mu_call); return; } call_capacity = 8; call_count = 0; calls = gpr_malloc(sizeof(grpc_call *) * call_capacity); for (calld = server->lists[ALL_CALLS]; calld != server->lists[ALL_CALLS] || is_first; calld = calld->links[ALL_CALLS].next) { if (call_count == call_capacity) { call_capacity *= 2; calls = gpr_realloc(calls, sizeof(grpc_call *) * call_capacity); } calls[call_count++] = calld->call; GRPC_CALL_INTERNAL_REF(calld->call, "cancel_all"); is_first = 0; } gpr_mu_unlock(&server->mu_call); for (i = 0; i < call_count; i++) { grpc_call_cancel_with_status(calls[i], GRPC_STATUS_UNAVAILABLE, "Unavailable"); GRPC_CALL_INTERNAL_UNREF(calls[i], "cancel_all", 1); } gpr_free(calls); }
static void call_on_done_send(void *pc, int success) { grpc_call *call = pc; lock(call); if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_INITIAL_METADATA)) { finish_ioreq_op(call, GRPC_IOREQ_SEND_INITIAL_METADATA, success); } if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_MESSAGE)) { finish_ioreq_op(call, GRPC_IOREQ_SEND_MESSAGE, success); } if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_CLOSE)) { finish_ioreq_op(call, GRPC_IOREQ_SEND_TRAILING_METADATA, success); finish_ioreq_op(call, GRPC_IOREQ_SEND_STATUS, success); finish_ioreq_op(call, GRPC_IOREQ_SEND_CLOSE, 1); } call->last_send_contains = 0; call->sending = 0; unlock(call); GRPC_CALL_INTERNAL_UNREF(call, "sending", 0); }
static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, int success) { batch_control *bctl = bctlp; grpc_call *call = bctl->call; grpc_call *child_call; grpc_call *next_child_call; gpr_mu_lock(&call->mu); if (bctl->send_initial_metadata) { grpc_metadata_batch_destroy( &call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */]); } if (bctl->send_message) { call->sending_message = 0; } if (bctl->send_final_op) { grpc_metadata_batch_destroy( &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */]); } if (bctl->recv_initial_metadata) { grpc_metadata_batch *md = &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */]; grpc_metadata_batch_filter(md, recv_initial_filter, call); if (gpr_time_cmp(md->deadline, gpr_inf_future(md->deadline.clock_type)) != 0 && !call->is_client) { GPR_TIMER_BEGIN("set_deadline_alarm", 0); set_deadline_alarm(exec_ctx, call, md->deadline); GPR_TIMER_END("set_deadline_alarm", 0); } } if (bctl->recv_final_op) { grpc_metadata_batch *md = &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */]; grpc_metadata_batch_filter(md, recv_trailing_filter, call); if (call->have_alarm) { grpc_timer_cancel(exec_ctx, &call->alarm); } /* propagate cancellation to any interested children */ child_call = call->first_child; if (child_call != NULL) { do { next_child_call = child_call->sibling_next; if (child_call->cancellation_is_inherited) { GRPC_CALL_INTERNAL_REF(child_call, "propagate_cancel"); grpc_call_cancel(child_call, NULL); GRPC_CALL_INTERNAL_UNREF(exec_ctx, child_call, "propagate_cancel"); } child_call = next_child_call; } while (child_call != call->first_child); } if (call->is_client) { get_final_status(call, set_status_value_directly, call->final_op.client.status); get_final_details(call, call->final_op.client.status_details, call->final_op.client.status_details_capacity); } else { get_final_status(call, set_cancelled_value, call->final_op.server.cancelled); } success = 1; } bctl->success = success != 0; gpr_mu_unlock(&call->mu); if (gpr_unref(&bctl->steps_to_complete)) { post_batch_completion(exec_ctx, bctl); } }
static void done_cancel(grpc_exec_ctx *exec_ctx, void *ccp, int success) { cancel_closure *cc = ccp; GRPC_CALL_INTERNAL_UNREF(exec_ctx, cc->call, "cancel"); gpr_free(cc); }