/* Destructor for channel_data */ static void destroy_channel_elem(grpc_channel_element *elem) { channel_data *chand = elem->channel_data; grpc_transport_setup_cancel(chand->transport_setup); if (chand->active_child) { grpc_child_channel_destroy(chand->active_child, 1); chand->active_child = NULL; } grpc_channel_args_destroy(chand->args); gpr_mu_destroy(&chand->mu); GPR_ASSERT(chand->waiting_child_count == 0); gpr_free(chand->waiting_children); }
grpc_transport_setup_result grpc_client_channel_transport_setup_complete( grpc_channel_stack *channel_stack, grpc_transport *transport, grpc_channel_filter const **channel_filters, size_t num_channel_filters, grpc_mdctx *mdctx) { /* we just got a new transport: lets create a child channel stack for it */ grpc_channel_element *elem = grpc_channel_stack_last_element(channel_stack); channel_data *chand = elem->channel_data; size_t num_child_filters = 2 + num_channel_filters; grpc_channel_filter const **child_filters; grpc_transport_setup_result result; grpc_child_channel *old_active = NULL; call_data **waiting_children; size_t waiting_child_count; size_t i; grpc_call_op *call_ops; /* build the child filter stack */ child_filters = gpr_malloc(sizeof(grpc_channel_filter *) * num_child_filters); /* we always need a link back filter to get back to the connected channel */ child_filters[0] = &grpc_child_channel_top_filter; for (i = 0; i < num_channel_filters; i++) { child_filters[i + 1] = channel_filters[i]; } /* and we always need a connected channel to talk to the transport */ child_filters[num_child_filters - 1] = &grpc_connected_channel_filter; GPR_ASSERT(elem->filter == &grpc_client_channel_filter); /* BEGIN LOCKING CHANNEL */ gpr_mu_lock(&chand->mu); chand->transport_setup_initiated = 0; if (chand->active_child) { old_active = chand->active_child; } chand->active_child = grpc_child_channel_create( elem, child_filters, num_child_filters, chand->args, mdctx); result = grpc_connected_channel_bind_transport(chand->active_child, transport); /* capture the waiting children - we'll activate them outside the lock to avoid re-entrancy problems */ waiting_children = chand->waiting_children; waiting_child_count = chand->waiting_child_count; /* bumping up inflight_requests here avoids taking a lock per rpc below */ chand->waiting_children = NULL; chand->waiting_child_count = 0; chand->waiting_child_capacity = 0; call_ops = gpr_malloc(sizeof(grpc_call_op) * waiting_child_count); for (i = 0; i < waiting_child_count; i++) { call_ops[i].type = GRPC_SEND_START; call_ops[i].dir = GRPC_CALL_DOWN; call_ops[i].flags = waiting_children[i]->s.waiting.start_flags; call_ops[i].done_cb = waiting_children[i]->s.waiting.on_complete; call_ops[i].user_data = waiting_children[i]->s.waiting.on_complete_user_data; call_ops[i].data.start.pollset = waiting_children[i]->s.waiting.pollset; if (!prepare_activate(waiting_children[i]->elem, chand->active_child)) { waiting_children[i] = NULL; call_ops[i].done_cb(call_ops[i].user_data, GRPC_OP_ERROR); } } /* END LOCKING CHANNEL */ gpr_mu_unlock(&chand->mu); /* activate any pending operations - this is safe to do as we guarantee one and only one write operation per request at the surface api - if we lose that guarantee we need to do some curly locking here */ for (i = 0; i < waiting_child_count; i++) { if (waiting_children[i]) { complete_activate(waiting_children[i]->elem, &call_ops[i]); } } gpr_free(waiting_children); gpr_free(call_ops); gpr_free(child_filters); if (old_active) { grpc_child_channel_destroy(old_active, 1); } return result; }
static void channel_op(grpc_channel_element *elem, grpc_channel_element *from_elem, grpc_channel_op *op) { channel_data *chand = elem->channel_data; grpc_child_channel *child_channel; grpc_channel_op rop; GPR_ASSERT(elem->filter == &grpc_client_channel_filter); switch (op->type) { case GRPC_CHANNEL_GOAWAY: /* sending goaway: clear out the active child on the way through */ gpr_mu_lock(&chand->mu); child_channel = chand->active_child; chand->active_child = NULL; gpr_mu_unlock(&chand->mu); if (child_channel) { grpc_child_channel_handle_op(child_channel, op); grpc_child_channel_destroy(child_channel, 1); } else { gpr_slice_unref(op->data.goaway.message); } break; case GRPC_CHANNEL_DISCONNECT: /* sending disconnect: clear out the active child on the way through */ gpr_mu_lock(&chand->mu); child_channel = chand->active_child; chand->active_child = NULL; gpr_mu_unlock(&chand->mu); if (child_channel) { grpc_child_channel_destroy(child_channel, 1); } /* fake a transport closed to satisfy the refcounting in client */ rop.type = GRPC_TRANSPORT_CLOSED; rop.dir = GRPC_CALL_UP; grpc_channel_next_op(elem, &rop); break; case GRPC_TRANSPORT_GOAWAY: /* receiving goaway: if it's from our active child, drop the active child; in all cases consume the event here */ gpr_mu_lock(&chand->mu); child_channel = grpc_channel_stack_from_top_element(from_elem); if (child_channel == chand->active_child) { chand->active_child = NULL; } else { child_channel = NULL; } gpr_mu_unlock(&chand->mu); if (child_channel) { grpc_child_channel_destroy(child_channel, 0); } gpr_slice_unref(op->data.goaway.message); break; case GRPC_TRANSPORT_CLOSED: /* receiving disconnect: if it's from our active child, drop the active child; in all cases consume the event here */ gpr_mu_lock(&chand->mu); child_channel = grpc_channel_stack_from_top_element(from_elem); if (child_channel == chand->active_child) { chand->active_child = NULL; } else { child_channel = NULL; } gpr_mu_unlock(&chand->mu); if (child_channel) { grpc_child_channel_destroy(child_channel, 0); } break; default: switch (op->dir) { case GRPC_CALL_UP: grpc_channel_next_op(elem, op); break; case GRPC_CALL_DOWN: gpr_log(GPR_ERROR, "unhandled channel op: %d", op->type); abort(); break; } break; } }