示例#1
0
static void read_state_machine(grpc_exec_ctx *exec_ctx, void *arg,
                               grpc_error *error) {
  // This function gets called when we have contents in the unprocessed reads
  // Get what we want based on our ops wanted
  // Schedule our appropriate closures
  // and then return to reads_needed state if still needed

  // Since this is a closure directly invoked by the combiner, it should not
  // unref the error parameter explicitly; the combiner will do that implicitly
  grpc_error *new_err = GRPC_ERROR_NONE;

  bool needs_close = false;

  INPROC_LOG(GPR_DEBUG, "read_state_machine %p", arg);
  inproc_stream *s = (inproc_stream *)arg;
  gpr_mu *mu = &s->t->mu->mu;  // keep aside in case s gets closed
  gpr_mu_lock(mu);
  s->read_closure_scheduled = false;
  // cancellation takes precedence
  if (s->cancel_self_error != GRPC_ERROR_NONE) {
    fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(s->cancel_self_error));
    goto done;
  } else if (s->cancel_other_error != GRPC_ERROR_NONE) {
    fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(s->cancel_other_error));
    goto done;
  } else if (error != GRPC_ERROR_NONE) {
    fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(error));
    goto done;
  }

  if (s->recv_initial_md_op) {
    if (!s->to_read_initial_md_filled) {
      // We entered the state machine on some other kind of read even though
      // we still haven't satisfied initial md . That's an error.
      new_err =
          GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unexpected frame sequencing");
      INPROC_LOG(GPR_DEBUG,
                 "read_state_machine %p scheduling on_complete errors for no "
                 "initial md %p",
                 s, new_err);
      fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
      goto done;
    } else if (s->initial_md_recvd) {
      new_err =
          GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd initial md");
      INPROC_LOG(
          GPR_DEBUG,
          "read_state_machine %p scheduling on_complete errors for already "
          "recvd initial md %p",
          s, new_err);
      fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
      goto done;
    }

    s->initial_md_recvd = true;
    new_err = fill_in_metadata(
        exec_ctx, s, &s->to_read_initial_md, s->to_read_initial_md_flags,
        s->recv_initial_md_op->payload->recv_initial_metadata
            .recv_initial_metadata,
        s->recv_initial_md_op->payload->recv_initial_metadata.recv_flags, NULL);
    s->recv_initial_md_op->payload->recv_initial_metadata.recv_initial_metadata
        ->deadline = s->deadline;
    grpc_metadata_batch_clear(exec_ctx, &s->to_read_initial_md);
    s->to_read_initial_md_filled = false;
    INPROC_LOG(GPR_DEBUG,
               "read_state_machine %p scheduling initial-metadata-ready %p", s,
               new_err);
    GRPC_CLOSURE_SCHED(exec_ctx,
                       s->recv_initial_md_op->payload->recv_initial_metadata
                           .recv_initial_metadata_ready,
                       GRPC_ERROR_REF(new_err));
    if ((s->recv_initial_md_op != s->recv_message_op) &&
        (s->recv_initial_md_op != s->recv_trailing_md_op)) {
      INPROC_LOG(
          GPR_DEBUG,
          "read_state_machine %p scheduling initial-metadata-on-complete %p", s,
          new_err);
      GRPC_CLOSURE_SCHED(exec_ctx, s->recv_initial_md_op->on_complete,
                         GRPC_ERROR_REF(new_err));
    }
    s->recv_initial_md_op = NULL;

    if (new_err != GRPC_ERROR_NONE) {
      INPROC_LOG(GPR_DEBUG,
                 "read_state_machine %p scheduling on_complete errors2 %p", s,
                 new_err);
      fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
      goto done;
    }
  }
  if (s->to_read_initial_md_filled) {
    new_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unexpected recv frame");
    fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
    goto done;
  }
  if (!slice_buffer_list_empty(&s->to_read_message) && s->recv_message_op) {
    inproc_slice_byte_stream_init(
        &s->recv_message_stream,
        slice_buffer_list_pophead(&s->to_read_message));
    *s->recv_message_op->payload->recv_message.recv_message =
        &s->recv_message_stream.base;
    INPROC_LOG(GPR_DEBUG, "read_state_machine %p scheduling message-ready", s);
    GRPC_CLOSURE_SCHED(
        exec_ctx, s->recv_message_op->payload->recv_message.recv_message_ready,
        GRPC_ERROR_NONE);
    if (s->recv_message_op != s->recv_trailing_md_op) {
      INPROC_LOG(GPR_DEBUG,
                 "read_state_machine %p scheduling message-on-complete %p", s,
                 new_err);
      GRPC_CLOSURE_SCHED(exec_ctx, s->recv_message_op->on_complete,
                         GRPC_ERROR_REF(new_err));
    }
    s->recv_message_op = NULL;
  }
  if (s->to_read_trailing_md_filled) {
    if (s->trailing_md_recvd) {
      new_err =
          GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd trailing md");
      INPROC_LOG(
          GPR_DEBUG,
          "read_state_machine %p scheduling on_complete errors for already "
          "recvd trailing md %p",
          s, new_err);
      fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
      goto done;
    }
    if (s->recv_message_op != NULL) {
      // This message needs to be wrapped up because it will never be
      // satisfied
      INPROC_LOG(GPR_DEBUG, "read_state_machine %p scheduling message-ready",
                 s);
      GRPC_CLOSURE_SCHED(
          exec_ctx,
          s->recv_message_op->payload->recv_message.recv_message_ready,
          GRPC_ERROR_NONE);
      if (s->recv_message_op != s->recv_trailing_md_op) {
        INPROC_LOG(GPR_DEBUG,
                   "read_state_machine %p scheduling message-on-complete %p", s,
                   new_err);
        GRPC_CLOSURE_SCHED(exec_ctx, s->recv_message_op->on_complete,
                           GRPC_ERROR_REF(new_err));
      }
      s->recv_message_op = NULL;
    }
    if (s->recv_trailing_md_op != NULL) {
      // We wanted trailing metadata and we got it
      s->trailing_md_recvd = true;
      new_err =
          fill_in_metadata(exec_ctx, s, &s->to_read_trailing_md, 0,
                           s->recv_trailing_md_op->payload
                               ->recv_trailing_metadata.recv_trailing_metadata,
                           NULL, NULL);
      grpc_metadata_batch_clear(exec_ctx, &s->to_read_trailing_md);
      s->to_read_trailing_md_filled = false;

      // We should schedule the recv_trailing_md_op completion if
      // 1. this stream is the client-side
      // 2. this stream is the server-side AND has already sent its trailing md
      //    (If the server hasn't already sent its trailing md, it doesn't have
      //     a final status, so don't mark this op complete)
      if (s->t->is_client || s->trailing_md_sent) {
        INPROC_LOG(
            GPR_DEBUG,
            "read_state_machine %p scheduling trailing-md-on-complete %p", s,
            new_err);
        GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete,
                           GRPC_ERROR_REF(new_err));
        s->recv_trailing_md_op = NULL;
        needs_close = true;
      } else {
        INPROC_LOG(GPR_DEBUG,
                   "read_state_machine %p server needs to delay handling "
                   "trailing-md-on-complete %p",
                   s, new_err);
      }
    } else {
      INPROC_LOG(
          GPR_DEBUG,
          "read_state_machine %p has trailing md but not yet waiting for it",
          s);
    }
  }
  if (s->trailing_md_recvd && s->recv_message_op) {
    // No further message will come on this stream, so finish off the
    // recv_message_op
    INPROC_LOG(GPR_DEBUG, "read_state_machine %p scheduling message-ready", s);
    GRPC_CLOSURE_SCHED(
        exec_ctx, s->recv_message_op->payload->recv_message.recv_message_ready,
        GRPC_ERROR_NONE);
    if (s->recv_message_op != s->recv_trailing_md_op) {
      INPROC_LOG(GPR_DEBUG,
                 "read_state_machine %p scheduling message-on-complete %p", s,
                 new_err);
      GRPC_CLOSURE_SCHED(exec_ctx, s->recv_message_op->on_complete,
                         GRPC_ERROR_REF(new_err));
    }
    s->recv_message_op = NULL;
  }
  if (s->recv_message_op || s->recv_trailing_md_op) {
    // Didn't get the item we wanted so we still need to get
    // rescheduled
    INPROC_LOG(GPR_DEBUG, "read_state_machine %p still needs closure %p %p", s,
               s->recv_message_op, s->recv_trailing_md_op);
    s->reads_needed = true;
  }
done:
  if (needs_close) {
    close_other_side_locked(exec_ctx, s, "read_state_machine");
    close_stream_locked(exec_ctx, s);
  }
  gpr_mu_unlock(mu);
  GRPC_ERROR_UNREF(new_err);
}
示例#2
0
文件: call.c 项目: robottomw/grpc
static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
                                        grpc_call *call, const grpc_op *ops,
                                        size_t nops, void *notify_tag,
                                        int is_notify_tag_closure) {
  grpc_transport_stream_op stream_op;
  size_t i;
  const grpc_op *op;
  batch_control *bctl;
  int num_completion_callbacks_needed = 1;
  grpc_call_error error = GRPC_CALL_OK;

  GPR_TIMER_BEGIN("grpc_call_start_batch", 0);

  GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag);

  memset(&stream_op, 0, sizeof(stream_op));

  /* TODO(ctiller): this feels like it could be made lock-free */
  gpr_mu_lock(&call->mu);
  bctl = allocate_batch_control(call);
  memset(bctl, 0, sizeof(*bctl));
  bctl->call = call;
  bctl->notify_tag = notify_tag;
  bctl->is_notify_tag_closure = (gpr_uint8)(is_notify_tag_closure != 0);

  if (nops == 0) {
    GRPC_CALL_INTERNAL_REF(call, "completion");
    bctl->success = 1;
    if (!is_notify_tag_closure) {
      grpc_cq_begin_op(call->cq);
    }
    gpr_mu_unlock(&call->mu);
    post_batch_completion(exec_ctx, bctl);
    return GRPC_CALL_OK;
  }

  /* rewrite batch ops into a transport op */
  for (i = 0; i < nops; i++) {
    op = &ops[i];
    if (op->reserved != NULL) {
      error = GRPC_CALL_ERROR;
      goto done_with_error;
    }
    switch (op->op) {
      case GRPC_OP_SEND_INITIAL_METADATA:
        /* Flag validation: currently allow no flags */
        if (op->flags != 0) {
          error = GRPC_CALL_ERROR_INVALID_FLAGS;
          goto done_with_error;
        }
        if (call->sent_initial_metadata) {
          error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
          goto done_with_error;
        }
        if (op->data.send_initial_metadata.count > INT_MAX) {
          error = GRPC_CALL_ERROR_INVALID_METADATA;
          goto done_with_error;
        }
        bctl->send_initial_metadata = 1;
        call->sent_initial_metadata = 1;
        if (!prepare_application_metadata(
                call, (int)op->data.send_initial_metadata.count,
                op->data.send_initial_metadata.metadata, 0, call->is_client)) {
          error = GRPC_CALL_ERROR_INVALID_METADATA;
          goto done_with_error;
        }
        /* TODO(ctiller): just make these the same variable? */
        call->metadata_batch[0][0].deadline = call->send_deadline;
        stream_op.send_initial_metadata =
            &call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */];
        break;
      case GRPC_OP_SEND_MESSAGE:
        if (!are_write_flags_valid(op->flags)) {
          error = GRPC_CALL_ERROR_INVALID_FLAGS;
          goto done_with_error;
        }
        if (op->data.send_message == NULL) {
          error = GRPC_CALL_ERROR_INVALID_MESSAGE;
          goto done_with_error;
        }
        if (call->sending_message) {
          error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
          goto done_with_error;
        }
        bctl->send_message = 1;
        call->sending_message = 1;
        grpc_slice_buffer_stream_init(
            &call->sending_stream,
            &op->data.send_message->data.raw.slice_buffer, op->flags);
        stream_op.send_message = &call->sending_stream.base;
        break;
      case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
        /* Flag validation: currently allow no flags */
        if (op->flags != 0) {
          error = GRPC_CALL_ERROR_INVALID_FLAGS;
          goto done_with_error;
        }
        if (!call->is_client) {
          error = GRPC_CALL_ERROR_NOT_ON_SERVER;
          goto done_with_error;
        }
        if (call->sent_final_op) {
          error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
          goto done_with_error;
        }
        bctl->send_final_op = 1;
        call->sent_final_op = 1;
        stream_op.send_trailing_metadata =
            &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
        break;
      case GRPC_OP_SEND_STATUS_FROM_SERVER:
        /* Flag validation: currently allow no flags */
        if (op->flags != 0) {
          error = GRPC_CALL_ERROR_INVALID_FLAGS;
          goto done_with_error;
        }
        if (call->is_client) {
          error = GRPC_CALL_ERROR_NOT_ON_CLIENT;
          goto done_with_error;
        }
        if (call->sent_final_op) {
          error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
          goto done_with_error;
        }
        if (op->data.send_status_from_server.trailing_metadata_count >
            INT_MAX) {
          error = GRPC_CALL_ERROR_INVALID_METADATA;
          goto done_with_error;
        }
        bctl->send_final_op = 1;
        call->sent_final_op = 1;
        call->send_extra_metadata_count = 1;
        call->send_extra_metadata[0].md = grpc_channel_get_reffed_status_elem(
            call->channel, op->data.send_status_from_server.status);
        if (op->data.send_status_from_server.status_details != NULL) {
          call->send_extra_metadata[1].md = grpc_mdelem_from_metadata_strings(
              call->metadata_context,
              GRPC_MDSTR_REF(grpc_channel_get_message_string(call->channel)),
              grpc_mdstr_from_string(
                  call->metadata_context,
                  op->data.send_status_from_server.status_details));
          call->send_extra_metadata_count++;
          set_status_details(
              call, STATUS_FROM_API_OVERRIDE,
              GRPC_MDSTR_REF(call->send_extra_metadata[1].md->value));
        }
        set_status_code(call, STATUS_FROM_API_OVERRIDE,
                        (gpr_uint32)op->data.send_status_from_server.status);
        if (!prepare_application_metadata(
                call,
                (int)op->data.send_status_from_server.trailing_metadata_count,
                op->data.send_status_from_server.trailing_metadata, 1, 1)) {
          error = GRPC_CALL_ERROR_INVALID_METADATA;
          goto done_with_error;
        }
        stream_op.send_trailing_metadata =
            &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */];
        break;
      case GRPC_OP_RECV_INITIAL_METADATA:
        /* Flag validation: currently allow no flags */
        if (op->flags != 0) {
          error = GRPC_CALL_ERROR_INVALID_FLAGS;
          goto done_with_error;
        }
        if (call->received_initial_metadata) {
          error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
          goto done_with_error;
        }
        call->received_initial_metadata = 1;
        call->buffered_metadata[0] = op->data.recv_initial_metadata;
        bctl->recv_initial_metadata = 1;
        stream_op.recv_initial_metadata =
            &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
        break;
      case GRPC_OP_RECV_MESSAGE:
        /* Flag validation: currently allow no flags */
        if (op->flags != 0) {
          error = GRPC_CALL_ERROR_INVALID_FLAGS;
          goto done_with_error;
        }
        if (call->receiving_message) {
          error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
        }
        call->receiving_message = 1;
        bctl->recv_message = 1;
        call->receiving_buffer = op->data.recv_message;
        stream_op.recv_message = &call->receiving_stream;
        grpc_closure_init(&call->receiving_stream_ready, receiving_stream_ready,
                          bctl);
        stream_op.recv_message_ready = &call->receiving_stream_ready;
        num_completion_callbacks_needed++;
        break;
      case GRPC_OP_RECV_STATUS_ON_CLIENT:
        /* Flag validation: currently allow no flags */
        if (op->flags != 0) {
          error = GRPC_CALL_ERROR_INVALID_FLAGS;
          goto done_with_error;
        }
        if (!call->is_client) {
          error = GRPC_CALL_ERROR_NOT_ON_SERVER;
          goto done_with_error;
        }
        if (call->received_final_op) {
          error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
          goto done_with_error;
        }
        call->received_final_op = 1;
        call->buffered_metadata[1] =
            op->data.recv_status_on_client.trailing_metadata;
        call->final_op.client.status = op->data.recv_status_on_client.status;
        call->final_op.client.status_details =
            op->data.recv_status_on_client.status_details;
        call->final_op.client.status_details_capacity =
            op->data.recv_status_on_client.status_details_capacity;
        bctl->recv_final_op = 1;
        stream_op.recv_trailing_metadata =
            &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
        break;
      case GRPC_OP_RECV_CLOSE_ON_SERVER:
        /* Flag validation: currently allow no flags */
        if (op->flags != 0) {
          error = GRPC_CALL_ERROR_INVALID_FLAGS;
          goto done_with_error;
        }
        if (call->is_client) {
          error = GRPC_CALL_ERROR_NOT_ON_CLIENT;
          goto done_with_error;
        }
        if (call->received_final_op) {
          error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
          goto done_with_error;
        }
        call->received_final_op = 1;
        call->final_op.server.cancelled =
            op->data.recv_close_on_server.cancelled;
        bctl->recv_final_op = 1;
        stream_op.recv_trailing_metadata =
            &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
        break;
    }
  }

  GRPC_CALL_INTERNAL_REF(call, "completion");
  if (!is_notify_tag_closure) {
    grpc_cq_begin_op(call->cq);
  }
  gpr_ref_init(&bctl->steps_to_complete, num_completion_callbacks_needed);

  stream_op.context = call->context;
  grpc_closure_init(&bctl->finish_batch, finish_batch, bctl);
  stream_op.on_complete = &bctl->finish_batch;
  gpr_mu_unlock(&call->mu);

  execute_op(exec_ctx, call, &stream_op);

done:
  GPR_TIMER_END("grpc_call_start_batch", 0);
  return error;

done_with_error:
  /* reverse any mutations that occured */
  if (bctl->send_initial_metadata) {
    call->sent_initial_metadata = 0;
    grpc_metadata_batch_clear(&call->metadata_batch[0][0]);
  }
  if (bctl->send_message) {
    call->sending_message = 0;
    grpc_byte_stream_destroy(&call->sending_stream.base);
  }
  if (bctl->send_final_op) {
    call->sent_final_op = 0;
    grpc_metadata_batch_clear(&call->metadata_batch[0][1]);
  }
  if (bctl->recv_initial_metadata) {
    call->received_initial_metadata = 0;
  }
  if (bctl->recv_message) {
    call->receiving_message = 0;
  }
  if (bctl->recv_final_op) {
    call->received_final_op = 0;
  }
  gpr_mu_unlock(&call->mu);
  goto done;
}
示例#3
0
static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
                       grpc_stream *gs, grpc_stream_refcount *refcount,
                       const void *server_data, gpr_arena *arena) {
  INPROC_LOG(GPR_DEBUG, "init_stream %p %p %p", gt, gs, server_data);
  inproc_transport *t = (inproc_transport *)gt;
  inproc_stream *s = (inproc_stream *)gs;
  s->arena = arena;

  s->refs = refcount;
  // Ref this stream right now
  ref_stream(s, "inproc_init_stream:init");

  grpc_metadata_batch_init(&s->to_read_initial_md);
  s->to_read_initial_md_flags = 0;
  s->to_read_initial_md_filled = false;
  grpc_metadata_batch_init(&s->to_read_trailing_md);
  s->to_read_trailing_md_filled = false;
  grpc_metadata_batch_init(&s->write_buffer_initial_md);
  s->write_buffer_initial_md_flags = 0;
  s->write_buffer_initial_md_filled = false;
  grpc_metadata_batch_init(&s->write_buffer_trailing_md);
  s->write_buffer_trailing_md_filled = false;
  slice_buffer_list_init(&s->to_read_message);
  slice_buffer_list_init(&s->write_buffer_message);
  s->reads_needed = false;
  s->read_closure_scheduled = false;
  GRPC_CLOSURE_INIT(&s->read_closure, read_state_machine, s,
                    grpc_schedule_on_exec_ctx);
  s->t = t;
  s->closure_at_destroy = NULL;
  s->other_side_closed = false;

  s->initial_md_sent = s->trailing_md_sent = s->initial_md_recvd =
      s->trailing_md_recvd = false;

  s->closed = false;

  s->cancel_self_error = GRPC_ERROR_NONE;
  s->cancel_other_error = GRPC_ERROR_NONE;
  s->write_buffer_cancel_error = GRPC_ERROR_NONE;
  s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
  s->write_buffer_deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);

  s->stream_list_prev = NULL;
  gpr_mu_lock(&t->mu->mu);
  s->listed = true;
  ref_stream(s, "inproc_init_stream:list");
  s->stream_list_next = t->stream_list;
  if (t->stream_list) {
    t->stream_list->stream_list_prev = s;
  }
  t->stream_list = s;
  gpr_mu_unlock(&t->mu->mu);

  if (!server_data) {
    ref_transport(t);
    inproc_transport *st = t->other_side;
    ref_transport(st);
    s->other_side = NULL;  // will get filled in soon
    // Pass the client-side stream address to the server-side for a ref
    ref_stream(s, "inproc_init_stream:clt");  // ref it now on behalf of server
                                              // side to avoid destruction
    INPROC_LOG(GPR_DEBUG, "calling accept stream cb %p %p",
               st->accept_stream_cb, st->accept_stream_data);
    (*st->accept_stream_cb)(exec_ctx, st->accept_stream_data, &st->base,
                            (void *)s);
  } else {
    // This is the server-side and is being called through accept_stream_cb
    inproc_stream *cs = (inproc_stream *)server_data;
    s->other_side = cs;
    // Ref the server-side stream on behalf of the client now
    ref_stream(s, "inproc_init_stream:srv");

    // Now we are about to affect the other side, so lock the transport
    // to make sure that it doesn't get destroyed
    gpr_mu_lock(&s->t->mu->mu);
    cs->other_side = s;
    // Now transfer from the other side's write_buffer if any to the to_read
    // buffer
    if (cs->write_buffer_initial_md_filled) {
      fill_in_metadata(exec_ctx, s, &cs->write_buffer_initial_md,
                       cs->write_buffer_initial_md_flags,
                       &s->to_read_initial_md, &s->to_read_initial_md_flags,
                       &s->to_read_initial_md_filled);
      s->deadline = gpr_time_min(s->deadline, cs->write_buffer_deadline);
      grpc_metadata_batch_clear(exec_ctx, &cs->write_buffer_initial_md);
      cs->write_buffer_initial_md_filled = false;
    }
    while (!slice_buffer_list_empty(&cs->write_buffer_message)) {
      slice_buffer_list_append_entry(
          &s->to_read_message,
          slice_buffer_list_pophead(&cs->write_buffer_message));
    }
    if (cs->write_buffer_trailing_md_filled) {
      fill_in_metadata(exec_ctx, s, &cs->write_buffer_trailing_md, 0,
                       &s->to_read_trailing_md, NULL,
                       &s->to_read_trailing_md_filled);
      grpc_metadata_batch_clear(exec_ctx, &cs->write_buffer_trailing_md);
      cs->write_buffer_trailing_md_filled = false;
    }
    if (cs->write_buffer_cancel_error != GRPC_ERROR_NONE) {
      s->cancel_other_error = cs->write_buffer_cancel_error;
      cs->write_buffer_cancel_error = GRPC_ERROR_NONE;
    }

    gpr_mu_unlock(&s->t->mu->mu);
  }
  return 0;  // return value is not important
}