static void init_call_elem(grpc_call_element *elem, const void *transport_server_data, grpc_transport_op *initial_op) { if (initial_op) { grpc_transport_op_finish_with_failure(initial_op); } }
grpc_transport_setup_result grpc_client_channel_transport_setup_complete( grpc_channel_stack *channel_stack, grpc_transport *transport, grpc_channel_filter const **channel_filters, size_t num_channel_filters, grpc_mdctx *mdctx) { /* we just got a new transport: lets create a child channel stack for it */ grpc_channel_element *elem = grpc_channel_stack_last_element(channel_stack); channel_data *chand = elem->channel_data; size_t num_child_filters = 2 + num_channel_filters; grpc_channel_filter const **child_filters; grpc_transport_setup_result result; grpc_child_channel *old_active = NULL; call_data **waiting_children; size_t waiting_child_count; size_t i; grpc_transport_op *call_ops; /* build the child filter stack */ child_filters = gpr_malloc(sizeof(grpc_channel_filter *) * num_child_filters); /* we always need a link back filter to get back to the connected channel */ child_filters[0] = &grpc_child_channel_top_filter; for (i = 0; i < num_channel_filters; i++) { child_filters[i + 1] = channel_filters[i]; } /* and we always need a connected channel to talk to the transport */ child_filters[num_child_filters - 1] = &grpc_connected_channel_filter; GPR_ASSERT(elem->filter == &grpc_client_channel_filter); /* BEGIN LOCKING CHANNEL */ gpr_mu_lock(&chand->mu); chand->transport_setup_initiated = 0; if (chand->active_child) { old_active = chand->active_child; } chand->active_child = grpc_child_channel_create( elem, child_filters, num_child_filters, chand->args, mdctx); result = grpc_connected_channel_bind_transport(chand->active_child, transport); /* capture the waiting children - we'll activate them outside the lock to avoid re-entrancy problems */ waiting_children = chand->waiting_children; waiting_child_count = chand->waiting_child_count; /* bumping up inflight_requests here avoids taking a lock per rpc below */ chand->waiting_children = NULL; chand->waiting_child_count = 0; chand->waiting_child_capacity = 0; call_ops = gpr_malloc(sizeof(*call_ops) * waiting_child_count); for (i = 0; i < waiting_child_count; i++) { call_ops[i] = waiting_children[i]->s.waiting_op; if (!prepare_activate(waiting_children[i]->elem, chand->active_child)) { waiting_children[i] = NULL; grpc_transport_op_finish_with_failure(&call_ops[i]); } } /* END LOCKING CHANNEL */ gpr_mu_unlock(&chand->mu); /* activate any pending operations - this is safe to do as we guarantee one and only one write operation per request at the surface api - if we lose that guarantee we need to do some curly locking here */ for (i = 0; i < waiting_child_count; i++) { if (waiting_children[i]) { complete_activate(waiting_children[i]->elem, &call_ops[i]); } } gpr_free(waiting_children); gpr_free(call_ops); gpr_free(child_filters); if (old_active) { grpc_child_channel_destroy(old_active, 1); } return result; }