static void cancel_rpc(grpc_call_element *elem, grpc_call_op *op) { call_data *calld = elem->call_data; channel_data *chand = elem->channel_data; grpc_call_element *child_elem; gpr_mu_lock(&chand->mu); switch (calld->state) { case CALL_ACTIVE: child_elem = grpc_child_call_get_top_element(calld->s.active.child_call); gpr_mu_unlock(&chand->mu); child_elem->filter->call_op(child_elem, elem, op); return; /* early out */ case CALL_WAITING: remove_waiting_child(chand, calld); calld->state = CALL_CANCELLED; gpr_mu_unlock(&chand->mu); send_up_cancelled_ops(elem); calld->s.waiting.on_complete(calld->s.waiting.on_complete_user_data, GRPC_OP_ERROR); return; /* early out */ case CALL_CREATED: calld->state = CALL_CANCELLED; gpr_mu_unlock(&chand->mu); send_up_cancelled_ops(elem); return; /* early out */ case CALL_CANCELLED: gpr_mu_unlock(&chand->mu); return; /* early out */ } gpr_log(GPR_ERROR, "should never reach here"); abort(); }
static void complete_activate(grpc_call_element *elem, grpc_call_op *op) { call_data *calld = elem->call_data; grpc_call_element *child_elem = grpc_child_call_get_top_element(calld->s.active.child_call); GPR_ASSERT(calld->state == CALL_ACTIVE); /* sending buffered metadata down the stack before the start call */ grpc_metadata_buffer_flush(&calld->pending_metadata, child_elem); if (gpr_time_cmp(calld->deadline, gpr_inf_future) != 0) { grpc_call_op dop; dop.type = GRPC_SEND_DEADLINE; dop.dir = GRPC_CALL_DOWN; dop.flags = 0; dop.data.deadline = calld->deadline; dop.done_cb = do_nothing; dop.user_data = NULL; child_elem->filter->call_op(child_elem, elem, &dop); } /* continue the start call down the stack, this nees to happen after metadata are flushed*/ child_elem->filter->call_op(child_elem, elem, op); }
static void complete_activate(grpc_call_element *elem, grpc_transport_op *op) { call_data *calld = elem->call_data; grpc_call_element *child_elem = grpc_child_call_get_top_element(calld->s.active.child_call); GPR_ASSERT(calld->state == CALL_ACTIVE); /* continue the start call down the stack, this nees to happen after metadata are flushed*/ child_elem->filter->start_transport_op(child_elem, op); }
static void call_op(grpc_call_element *elem, grpc_call_element *from_elem, grpc_call_op *op) { call_data *calld = elem->call_data; GPR_ASSERT(elem->filter == &grpc_client_channel_filter); GRPC_CALL_LOG_OP(GPR_INFO, elem, op); switch (op->type) { case GRPC_SEND_METADATA: grpc_metadata_buffer_queue(&calld->pending_metadata, op); break; case GRPC_SEND_DEADLINE: calld->deadline = op->data.deadline; op->done_cb(op->user_data, GRPC_OP_OK); break; case GRPC_SEND_START: /* filter out the start event to find which child to send on */ start_rpc(elem, op); break; case GRPC_CANCEL_OP: cancel_rpc(elem, op); break; case GRPC_SEND_MESSAGE: case GRPC_SEND_FINISH: case GRPC_REQUEST_DATA: if (calld->state == CALL_ACTIVE) { grpc_call_element *child_elem = grpc_child_call_get_top_element(calld->s.active.child_call); child_elem->filter->call_op(child_elem, elem, op); } else { op->done_cb(op->user_data, GRPC_OP_ERROR); } break; default: GPR_ASSERT(op->dir == GRPC_CALL_UP); grpc_call_next_op(elem, op); break; } }
static void cc_start_transport_op(grpc_call_element *elem, grpc_transport_op *op) { call_data *calld = elem->call_data; channel_data *chand = elem->channel_data; grpc_call_element *child_elem; grpc_transport_op waiting_op; GPR_ASSERT(elem->filter == &grpc_client_channel_filter); GRPC_CALL_LOG_OP(GPR_INFO, elem, op); gpr_mu_lock(&chand->mu); switch (calld->state) { case CALL_ACTIVE: child_elem = grpc_child_call_get_top_element(calld->s.active.child_call); gpr_mu_unlock(&chand->mu); child_elem->filter->start_transport_op(child_elem, op); break; case CALL_CREATED: if (op->cancel_with_status != GRPC_STATUS_OK) { calld->state = CALL_CANCELLED; gpr_mu_unlock(&chand->mu); handle_op_after_cancellation(elem, op); } else { calld->state = CALL_WAITING; if (chand->active_child) { /* channel is connected - use the connected stack */ if (prepare_activate(elem, chand->active_child)) { gpr_mu_unlock(&chand->mu); /* activate the request (pass it down) outside the lock */ complete_activate(elem, op); } else { gpr_mu_unlock(&chand->mu); } } else { /* check to see if we should initiate a connection (if we're not already), but don't do so until outside the lock to avoid re-entrancy problems if the callback is immediate */ int initiate_transport_setup = 0; if (!chand->transport_setup_initiated) { chand->transport_setup_initiated = 1; initiate_transport_setup = 1; } /* add this call to the waiting set to be resumed once we have a child channel stack, growing the waiting set if needed */ if (chand->waiting_child_count == chand->waiting_child_capacity) { chand->waiting_child_capacity = GPR_MAX(chand->waiting_child_capacity * 2, 8); chand->waiting_children = gpr_realloc( chand->waiting_children, chand->waiting_child_capacity * sizeof(call_data *)); } calld->s.waiting_op = *op; chand->waiting_children[chand->waiting_child_count++] = calld; gpr_mu_unlock(&chand->mu); /* finally initiate transport setup if needed */ if (initiate_transport_setup) { grpc_transport_setup_initiate(chand->transport_setup); } } } break; case CALL_WAITING: if (op->cancel_with_status != GRPC_STATUS_OK) { waiting_op = calld->s.waiting_op; remove_waiting_child(chand, calld); calld->state = CALL_CANCELLED; gpr_mu_unlock(&chand->mu); handle_op_after_cancellation(elem, &waiting_op); handle_op_after_cancellation(elem, op); } else { GPR_ASSERT((calld->s.waiting_op.send_ops == NULL) != (op->send_ops == NULL)); GPR_ASSERT((calld->s.waiting_op.recv_ops == NULL) != (op->recv_ops == NULL)); if (op->send_ops) { calld->s.waiting_op.send_ops = op->send_ops; calld->s.waiting_op.is_last_send = op->is_last_send; calld->s.waiting_op.on_done_send = op->on_done_send; calld->s.waiting_op.send_user_data = op->send_user_data; } if (op->recv_ops) { calld->s.waiting_op.recv_ops = op->recv_ops; calld->s.waiting_op.recv_state = op->recv_state; calld->s.waiting_op.on_done_recv = op->on_done_recv; calld->s.waiting_op.recv_user_data = op->recv_user_data; } gpr_mu_unlock(&chand->mu); } break; case CALL_CANCELLED: gpr_mu_unlock(&chand->mu); handle_op_after_cancellation(elem, op); break; } }