Пример #1
0
void grpc_call_internal_unref(grpc_call *c, const char *reason,
                              int allow_immediate_deletion) {
  gpr_log(GPR_DEBUG, "CALL: unref %p %d -> %d [%s]", c,
          c->internal_refcount.count, c->internal_refcount.count - 1, reason);
#else
void grpc_call_internal_unref(grpc_call *c, int allow_immediate_deletion) {
#endif
  if (gpr_unref(&c->internal_refcount)) {
    if (allow_immediate_deletion) {
      destroy_call(c, 1);
    } else {
      grpc_iomgr_add_callback(destroy_call, c);
    }
  }
}

static void set_status_code(grpc_call *call, status_source source,
                            gpr_uint32 status) {
  call->status[source].is_set = 1;
  call->status[source].code = status;

  if (status != GRPC_STATUS_OK && !grpc_bbq_empty(&call->incoming_queue)) {
    grpc_bbq_flush(&call->incoming_queue);
  }
}
Пример #2
0
/* Releases the c-level resources associated with a call
   Once a call has been closed, no further requests can be
   processed.
*/
static VALUE grpc_rb_call_close(VALUE self) {
  grpc_rb_call *call = NULL;
  TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call);
  if (call != NULL) {
    destroy_call(call);
    RTYPEDDATA_DATA(self) = NULL;
  }
  return Qnil;
}
Пример #3
0
void grpc_call_internal_unref(grpc_call *c, int allow_immediate_deletion) {
  if (gpr_unref(&c->internal_refcount)) {
    if (allow_immediate_deletion) {
      destroy_call(c, 1);
    } else {
      grpc_iomgr_add_callback(destroy_call, c);
    }
  }
}
Пример #4
0
static void call_on_state_changed( pjsip_inv_session *inv, 
				   pjsip_event *e)
{
    call_t *call = (call_t*)inv->mod_data[mod_sipecho.id];
    if (!call)
	return;

    PJ_UNUSED_ARG(e);
    if (inv->state == PJSIP_INV_STATE_DISCONNECTED) {
	PJ_LOG(3,(THIS_FILE, "Call %d: DISCONNECTED [reason=%d (%s)]",
		  call - app.call, inv->cause,
		  pjsip_get_status_text(inv->cause)->ptr));
	destroy_call(call);
    } else {
	PJ_LOG(3,(THIS_FILE, "Call %d: state changed to %s",
		  call - app.call, pjsip_inv_state_name(inv->state)));
    }
}
Пример #5
0
void destroy_tunnel (struct tunnel *t)
{
    /*
     * Immediately destroy a tunnel (and all its calls)
     * and free its resources.  This may be called
     * by the tunnel itself,so it needs to be
     * "suicide safe"
     */

    struct call *c, *me;
    struct tunnel *p;
    struct timeval tv;
    if (!t)
        return;

    /*
     * Save ourselves until the very
     * end, since we might be calling this ourselves.
     * We must divorce ourself from the tunnel
     * structure, however, to avoid recursion
     * because of the logic of the destroy_call
     */
    me = t->self;

    /*
     * Destroy all the member calls
     */
    c = t->call_head;
    while (c)
    {
        destroy_call (c);
        c = c->next;
    };
    /*
     * Remove ourselves from the list of tunnels
     */

    if (tunnels.head == t)
    {
        tunnels.head = t->next;
        tunnels.count--;
    }
    else
    {
        p = tunnels.head;
        if (p)
        {
            while (p->next && (p->next != t))
                p = p->next;
            if (p->next)
            {
                p->next = t->next;
                tunnels.count--;
            }
            else
            {
                log (LOG_WARN,
                     "%s: unable to locate tunnel in tunnel list\n",
                     __FUNCTION__);
            }
        }
        else
        {
            log (LOG_WARN, "%s: tunnel list is empty!\n", __FUNCTION__);
        }
    }
    if (t->lac)
    {
        t->lac->t = NULL;
        if (t->lac->redial && (t->lac->rtimeout > 0) && !t->lac->rsched &&
            t->lac->active)
        {
            log (LOG_LOG, "Will redial in %d seconds\n",
                 t->lac->rtimeout);
            tv.tv_sec = t->lac->rtimeout;
            tv.tv_usec = 0;
            t->lac->rsched = schedule (tv, magic_lac_dial, t->lac);
        }
    }
    /* XXX L2TP/IPSec: remove relevant SAs here?  NTB 20011010
     * XXX But what if another tunnel is using same SA?
     */
    if (t->lns)
        t->lns->t = NULL;
    if (t->chal_us.challenge)
	free (t->chal_us.challenge);
    if (t->chal_them.challenge)
	free (t->chal_them.challenge);
    /* we need no free(t->chal_us.vector) here because we malloc() and free()
       the memory pointed to by t->chal_us.vector at some other place */
    if (t->chal_them.vector)
	free (t->chal_them.vector);
    free (t);
    free (me);
}
Пример #6
0
/* Destroys a Call. */
static void grpc_rb_call_destroy(void *p) {
  if (p == NULL) {
    return;
  }
  destroy_call((grpc_rb_call *)p);
}
Пример #7
0
void call_close (struct call *c)
{
    struct buffer *buf;
    struct schedule_entry *se, *ose;
    struct call *tmp, *tmp2;
    if (!c || !c->container)
    {
        l2tp_log (LOG_DEBUG, "%s: called on null call or containerless call\n",
             __FUNCTION__);
        return;
    }
    if (c == c->container->self)
    {
        /*
         * We're actually closing the
         * entire tunnel
         */

        /* First deschedule any remaining packet transmissions
           for this tunnel.  That means Hello's and any reminaing
           packets scheduled for transmission.  This is a very
           nasty little piece of code here. */

        se = events;
        ose = NULL;
        while (se)
        {
            if ((((struct buffer *) se->data)->tunnel == c->container)
                || ((struct tunnel *) se->data == c->container))
            {
#ifdef DEBUG_CLOSE
                l2tp_log (LOG_DEBUG, "%s: Descheduling event\n", __FUNCTION__);
#endif
                if (ose)
                {
                    ose->next = se->next;
                    if ((struct tunnel *) se->data != c->container)
                        toss ((struct buffer *) (se->data));
                    free (se);
                    se = ose->next;
                }
                else
                {
                    events = se->next;
                    if ((struct tunnel *) se->data != c->container)
                        toss ((struct buffer *) (se->data));
                    free (se);
                    se = events;
                }
            }
            else
            {
                ose = se;
                se = se->next;
            }
        }

        if (c->closing)
        {
            /* Really close this tunnel, as our
               StopCCN has been ack'd */
#ifdef DEBUG_CLOSE
            l2tp_log (LOG_DEBUG, "%s: Actually closing tunnel %d\n", __FUNCTION__,
                 c->container->ourtid);
#endif
#ifdef USE_KERNEL
            if (kernel_support)
                ioctl (server_socket, L2TPIOCDELTUNNEL, c->container->ourtid);
#endif
            destroy_tunnel (c->container);
            return;
        }

        /*
           * We need to close, but need to provide reliable delivery
           * of the final StopCCN. We record our state to know when
           * we have actually received an ACK on our StopCCN
         */
        c->closeSs = c->container->control_seq_num;
        buf = new_outgoing (c->container);
        add_message_type_avp (buf, StopCCN);
        if (c->container->hbit)
        {
            mk_challenge (c->container->chal_them.vector, VECTOR_SIZE);
            add_randvect_avp (buf, c->container->chal_them.vector,
                              VECTOR_SIZE);
        }
        add_tunnelid_avp (buf, c->container->ourtid);
        if (c->result < 0)
            c->result = RESULT_CLEAR;
        if (c->error < 0)
            c->error = 0;
        add_result_code_avp (buf, c->result, c->error, c->errormsg,
                             strlen (c->errormsg));
        add_control_hdr (c->container, c, buf);
        if (packet_dump)
            do_packet_dump (buf);
#ifdef DEBUG_CLOSE
        l2tp_log (LOG_DEBUG, "%s: enqueing close message for tunnel\n",
             __FUNCTION__);
#endif
        control_xmit (buf);
        /*
           * We also need to stop all traffic on any calls contained
           * within us.
         */
        tmp = c->container->call_head;
        while (tmp)
        {
            tmp2 = tmp->next;
            tmp->needclose = 0;
            tmp->closing = -1;
            call_close (tmp);
            tmp = tmp2;
        }
	/* mf, 16.04.2003: change log message to show tunneltag */
        // l2tp_log (LOG_LOG,
        //      "%s : Connection %d closed to %s, port %d (%s)\n", __FUNCTION__,
        //      c->container->tid,
        //      IPADDY (c->container->peer.sin_addr),
        //      ntohs (c->container->peer.sin_port), c->errormsg);
        l2tp_log (LOG_LOG,
             "%s : Connection closed with peer %s, reason: %s\n",
             __FUNCTION__, c->container->tunneltag, c->errormsg);
    }
    else
    {
        /*
           * Just close a call
         */
#ifdef USE_KERNEL
        struct l2tp_call_opts co;
#endif
        if (c->zlb_xmit)
            deschedule (c->zlb_xmit);
/*		if (c->dethrottle) deschedule(c->dethrottle); */
        if (c->closing)
        {
#ifdef DEBUG_CLOSE
            l2tp_log (LOG_DEBUG, "%s: Actually closing call %d\n", __FUNCTION__,
                 c->ourcid);
#endif
            destroy_call (c);
            return;
        }
#ifdef USE_KERNEL
        if (kernel_support)
        {
            co.ourtid = c->container->ourtid;
            co.ourcid = c->ourcid;
            ioctl (server_socket, L2TPIOCGETCALLOPTS, &co);
            co.flags = co.flags & ~L2TP_FLAG_CALL_UP;
            ioctl (server_socket, L2TPIOCSETCALLOPTS, &co);
        }
#endif
        c->closeSs = c->container->control_seq_num;
        buf = new_outgoing (c->container);
        add_message_type_avp (buf, CDN);
        if (c->container->hbit)
        {
            mk_challenge (c->container->chal_them.vector, VECTOR_SIZE);
            add_randvect_avp (buf, c->container->chal_them.vector,
                              VECTOR_SIZE);
        }
        if (c->result < 0)
            c->result = RESULT_CLEAR;
        if (c->error < 0)
            c->error = 0;
        add_result_code_avp (buf, c->result, c->error, c->errormsg,
                             strlen (c->errormsg));
#ifdef TEST_HIDDEN
        add_callid_avp (buf, c->ourcid, c->container);
#else
        add_callid_avp (buf, c->ourcid);
#endif
        add_control_hdr (c->container, c, buf);
        if (packet_dump)
            do_packet_dump (buf);
#ifdef DEBUG_CLOSE
        l2tp_log (LOG_DEBUG, "%s: enqueuing close message for call %d\n",
             __FUNCTION__, c->ourcid);
#endif
        control_xmit (buf);
        l2tp_log (LOG_LOG, "%s: Call %d to %s disconnected\n", __FUNCTION__,
             c->ourcid, IPADDY (c->container->peer.sin_addr));
    }
    /*
       * Note that we're in the process of closing now
     */
    c->closing = -1;
}
Пример #8
0
static pj_bool_t on_rx_request( pjsip_rx_data *rdata )
{
    pj_sockaddr hostaddr;
    char temp[80], hostip[PJ_INET6_ADDRSTRLEN];
    pj_str_t local_uri;
    pjsip_dialog *dlg;
    pjsip_rdata_sdp_info *sdp_info;
    pjmedia_sdp_session *answer = NULL;
    pjsip_tx_data *tdata = NULL;
    call_t *call = NULL;
    unsigned i;
    pj_status_t status;

    PJ_LOG(3,(THIS_FILE, "RX %.*s from %s",
	      (int)rdata->msg_info.msg->line.req.method.name.slen,
	      rdata->msg_info.msg->line.req.method.name.ptr,
	      rdata->pkt_info.src_name));

    if (rdata->msg_info.msg->line.req.method.id == PJSIP_REGISTER_METHOD) {
	/* Let me be a registrar! */
	pjsip_hdr hdr_list, *h;
	pjsip_msg *msg;
	int expires = -1;

	pj_list_init(&hdr_list);
	msg = rdata->msg_info.msg;
	h = (pjsip_hdr*)pjsip_msg_find_hdr(msg, PJSIP_H_EXPIRES, NULL);
	if (h) {
	    expires = ((pjsip_expires_hdr*)h)->ivalue;
	    pj_list_push_back(&hdr_list, pjsip_hdr_clone(rdata->tp_info.pool, h));
	    PJ_LOG(3,(THIS_FILE, " Expires=%d", expires));
	}
	if (expires != 0) {
	    h = (pjsip_hdr*)pjsip_msg_find_hdr(msg, PJSIP_H_CONTACT, NULL);
	    if (h)
		pj_list_push_back(&hdr_list, pjsip_hdr_clone(rdata->tp_info.pool, h));
	}

	pjsip_endpt_respond(app.sip_endpt, &mod_sipecho, rdata, 200, NULL,
	                    &hdr_list, NULL, NULL);
	return PJ_TRUE;
    }

    if (rdata->msg_info.msg->line.req.method.id != PJSIP_INVITE_METHOD) {
	if (rdata->msg_info.msg->line.req.method.id != PJSIP_ACK_METHOD) {
	    pj_str_t reason = pj_str("Go away");
	    pjsip_endpt_respond_stateless( app.sip_endpt, rdata,
					   400, &reason,
					   NULL, NULL);
	}
	return PJ_TRUE;
    }

    sdp_info = pjsip_rdata_get_sdp_info(rdata);
    if (!sdp_info || !sdp_info->sdp) {
	pj_str_t reason = pj_str("Require valid offer");
	pjsip_endpt_respond_stateless( app.sip_endpt, rdata,
				       400, &reason,
				       NULL, NULL);
    }

    for (i=0; i<MAX_CALLS; ++i) {
	if (app.call[i].inv == NULL) {
	    call = &app.call[i];
	    break;
	}
    }

    if (i==MAX_CALLS) {
	pj_str_t reason = pj_str("We're full");
	pjsip_endpt_respond_stateless( app.sip_endpt, rdata,
				       PJSIP_SC_BUSY_HERE, &reason,
				       NULL, NULL);
	return PJ_TRUE;
    }

    /* Generate Contact URI */
    status = pj_gethostip(AF, &hostaddr);
    if (status != PJ_SUCCESS) {
	app_perror(THIS_FILE, "Unable to retrieve local host IP", status);
	return PJ_TRUE;
    }
    pj_sockaddr_print(&hostaddr, hostip, sizeof(hostip), 2);
    pj_ansi_sprintf(temp, "<sip:sipecho@%s:%d>", hostip, SIP_PORT);
    local_uri = pj_str(temp);

    status = pjsip_dlg_create_uas( pjsip_ua_instance(), rdata,
				   &local_uri, &dlg);

    if (status == PJ_SUCCESS)
	answer = create_answer(call-app.call, dlg->pool, sdp_info->sdp);
    if (status == PJ_SUCCESS)
    	status = pjsip_inv_create_uas( dlg, rdata, answer, 0, &call->inv);
    if (status == PJ_SUCCESS)
    	status = pjsip_inv_initial_answer(call->inv, rdata, 100,
				          NULL, NULL, &tdata);
    if (status == PJ_SUCCESS)
    	status = pjsip_inv_send_msg(call->inv, tdata);

    if (status == PJ_SUCCESS)
    	status = pjsip_inv_answer(call->inv, 180, NULL,
    	                          NULL, &tdata);
    if (status == PJ_SUCCESS)
    	status = pjsip_inv_send_msg(call->inv, tdata);

    if (status == PJ_SUCCESS)
    	status = pjsip_inv_answer(call->inv, 200, NULL,
    	                          NULL, &tdata);
    if (status == PJ_SUCCESS)
    	status = pjsip_inv_send_msg(call->inv, tdata);

    if (status != PJ_SUCCESS) {
	pjsip_endpt_respond_stateless( app.sip_endpt, rdata,
				       500, NULL, NULL, NULL);
	destroy_call(call);
    } else {
	call->inv->mod_data[mod_sipecho.id] = call;
    }

    return PJ_TRUE;
}
void call_close (struct call *c)
{
    struct buffer *buf;
    struct schedule_entry *se, *ose;
    struct call *tmp, *tmp2;
    if (!c || !c->container)
    {
        l2tp_log (LOG_DEBUG, "%s: called on null call or containerless call\n",
                  __FUNCTION__);
        return;
    }
    if (c == c->container->self)
    {
        /*
         * We're actually closing the
         * entire tunnel
         */

        /* First deschedule any remaining packet transmissions
           for this tunnel.  That means Hello's and any reminaing
           packets scheduled for transmission.  This is a very
           nasty little piece of code here. */

        se = events;
        ose = NULL;
        while (se)
        {
            if ((((struct buffer *) se->data)->tunnel == c->container)
                    || ((struct tunnel *) se->data == c->container))
            {
#ifdef DEBUG_CLOSE
                l2tp_log (LOG_DEBUG, "%s: Descheduling event\n", __FUNCTION__);
#endif
                if (ose)
                {
                    ose->next = se->next;
                    if ((struct tunnel *) se->data != c->container)
                        toss ((struct buffer *) (se->data));
                    free (se);
                    se = ose->next;
                }
                else
                {
                    events = se->next;
                    if ((struct tunnel *) se->data != c->container)
                        toss ((struct buffer *) (se->data));
                    free (se);
                    se = events;
                }
            }
            else
            {
                ose = se;
                se = se->next;
            }
        }

        if (c->closing)
        {
            /* Really close this tunnel, as our
               StopCCN has been ack'd */
#ifdef DEBUG_CLOSE
            l2tp_log (LOG_DEBUG, "%s: Actually closing tunnel %d\n", __FUNCTION__,
                      c->container->ourtid);
#endif
            destroy_tunnel (c->container);
            return;
        }

        /*
           * We need to close, but need to provide reliable delivery
           * of the final StopCCN. We record our state to know when
           * we have actually received an ACK on our StopCCN
         */
        c->closeSs = c->container->control_seq_num;
        buf = new_outgoing (c->container);
        add_message_type_avp (buf, StopCCN);
        if (c->container->hbit)
        {
            mk_challenge (c->container->chal_them.vector, VECTOR_SIZE);
            add_randvect_avp (buf, c->container->chal_them.vector,
                              VECTOR_SIZE);
        }
        add_tunnelid_avp (buf, c->container->ourtid);
        if (c->result < 0)
            c->result = RESULT_CLEAR;
        if (c->error < 0)
            c->error = 0;
        add_result_code_avp (buf, c->result, c->error, c->errormsg,
                             strlen (c->errormsg));
        add_control_hdr (c->container, c, buf);
        if (gconfig.packet_dump)
            do_packet_dump (buf);
#ifdef DEBUG_CLOSE
        l2tp_log (LOG_DEBUG, "%s: enqueing close message for tunnel\n",
                  __FUNCTION__);
#endif
        control_xmit (buf);
        /*
           * We also need to stop all traffic on any calls contained
           * within us.
         */
        tmp = c->container->call_head;
        while (tmp)
        {
            tmp2 = tmp->next;
            tmp->needclose = 0;
            tmp->closing = -1;
            call_close (tmp);
            tmp = tmp2;
        }
        l2tp_log (LOG_DEBUG, "Connection %d closed to %s, port %d (%s)\n",
                  c->container->tid,
                  IPADDY (c->container->peer.sin_addr),
                  ntohs (c->container->peer.sin_port), c->errormsg);

        if(strcmp(c->errormsg,"Server closing") )
        {
            if(!strcmp(c->errormsg,"goodbye!") )
                l2tp_log (LOG_INFO,
                          "Terminated by router connect %s, cause manual disconnect.\n", IPADDY (c->container->peer.sin_addr) );
            else if( c->msgtype <= 0 || c->msgtype > 16 )
                l2tp_log (LOG_INFO, "Detect %s from %s, port %d \n",
                          c->errormsg, IPADDY (c->container->peer.sin_addr),
                          ntohs (c->container->peer.sin_port));
            else
                l2tp_log (LOG_INFO, "Detect %s %s from %s, port %d \n",
                          msgtypes[c->msgtype],c->errormsg, IPADDY (c->container->peer.sin_addr),
                          ntohs (c->container->peer.sin_port));
        }
        //if( (!strcmp(c->errormsg, "Timeout")) && (c->container->tid != 0) )
        //	l2tp_log(LOG_INFO, "Terminated by router, cause no response to echo-requests.");

    }
    else
    {
        /*
           * Just close a call
         */
        if (c->zlb_xmit)
            deschedule (c->zlb_xmit);
        /*		if (c->dethrottle) deschedule(c->dethrottle); */
        if (c->closing)
        {
#ifdef DEBUG_CLOSE
            l2tp_log (LOG_DEBUG, "%s: Actually closing call %d\n", __FUNCTION__,
                      c->ourcid);
#endif
            destroy_call (c);
            return;
        }
        c->closeSs = c->container->control_seq_num;
        buf = new_outgoing (c->container);
        add_message_type_avp (buf, CDN);
        if (c->container->hbit)
        {
            mk_challenge (c->container->chal_them.vector, VECTOR_SIZE);
            add_randvect_avp (buf, c->container->chal_them.vector,
                              VECTOR_SIZE);
        }
        if (c->result < 0)
            c->result = RESULT_CLEAR;
        if (c->error < 0)
            c->error = 0;
        add_result_code_avp (buf, c->result, c->error, c->errormsg,
                             strlen (c->errormsg));
#ifdef TEST_HIDDEN
        add_callid_avp (buf, c->ourcid, c->container);
#else
        add_callid_avp (buf, c->ourcid);
#endif
        add_control_hdr (c->container, c, buf);
        if (gconfig.packet_dump)
            do_packet_dump (buf);
#ifdef DEBUG_CLOSE
        l2tp_log (LOG_DEBUG, "%s: enqueuing close message for call %d\n",
                  __FUNCTION__, c->ourcid);
#endif
        control_xmit (buf);
        l2tp_log (LOG_DEBUG, "%s: Call %d to %s disconnected\n", __FUNCTION__,
                  c->ourcid, IPADDY (c->container->peer.sin_addr));
    }
    /*
       * Note that we're in the process of closing now
     */
    c->closing = -1;
}
Пример #10
0
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
  grpc_test_only_set_metadata_hash_seed(0);
  if (squelch) gpr_set_log_function(dont_log);
  input_stream inp = {data, data + size};
  grpc_resolve_address = my_resolve_address;
  grpc_tcp_client_connect_impl = my_tcp_client_connect;
  gpr_now_impl = now_impl;
  grpc_init();

  GPR_ASSERT(g_channel == NULL);
  GPR_ASSERT(g_server == NULL);

  bool server_shutdown = false;
  int pending_server_shutdowns = 0;
  int pending_channel_watches = 0;
  int pending_pings = 0;

  g_active_call = new_call(NULL, ROOT);

  grpc_completion_queue *cq = grpc_completion_queue_create(NULL);

  while (!is_eof(&inp) || g_channel != NULL || g_server != NULL ||
         pending_channel_watches > 0 || pending_pings > 0 ||
         g_active_call->type != ROOT || g_active_call->next != g_active_call) {
    if (is_eof(&inp)) {
      if (g_channel != NULL) {
        grpc_channel_destroy(g_channel);
        g_channel = NULL;
      }
      if (g_server != NULL) {
        if (!server_shutdown) {
          grpc_server_shutdown_and_notify(
              g_server, cq, create_validator(assert_success_and_decrement,
                                             &pending_server_shutdowns));
          server_shutdown = true;
          pending_server_shutdowns++;
        } else if (pending_server_shutdowns == 0) {
          grpc_server_destroy(g_server);
          g_server = NULL;
        }
      }
      call_state *s = g_active_call;
      do {
        if (s->type != PENDING_SERVER && s->call != NULL) {
          s = destroy_call(s);
        } else {
          s = s->next;
        }
      } while (s != g_active_call);

      g_now = gpr_time_add(g_now, gpr_time_from_seconds(1, GPR_TIMESPAN));
    }

    switch (next_byte(&inp)) {
      // terminate on bad bytes
      default:
        end(&inp);
        break;
      // tickle completion queue
      case 0: {
        grpc_event ev = grpc_completion_queue_next(
            cq, gpr_inf_past(GPR_CLOCK_REALTIME), NULL);
        switch (ev.type) {
          case GRPC_OP_COMPLETE: {
            validator *v = ev.tag;
            v->validate(v->arg, ev.success);
            gpr_free(v);
            break;
          }
          case GRPC_QUEUE_TIMEOUT:
            break;
          case GRPC_QUEUE_SHUTDOWN:
            abort();
            break;
        }
        break;
      }
      // increment global time
      case 1: {
        g_now = gpr_time_add(
            g_now, gpr_time_from_micros(read_uint32(&inp), GPR_TIMESPAN));
        break;
      }
      // create an insecure channel
      case 2: {
        if (g_channel == NULL) {
          char *target = read_string(&inp);
          char *target_uri;
          gpr_asprintf(&target_uri, "dns:%s", target);
          grpc_channel_args *args = read_args(&inp);
          g_channel = grpc_insecure_channel_create(target_uri, args, NULL);
          GPR_ASSERT(g_channel != NULL);
          grpc_channel_args_destroy(args);
          gpr_free(target_uri);
          gpr_free(target);
        } else {
          end(&inp);
        }
        break;
      }
      // destroy a channel
      case 3: {
        if (g_channel != NULL) {
          grpc_channel_destroy(g_channel);
          g_channel = NULL;
        } else {
          end(&inp);
        }
        break;
      }
      // bring up a server
      case 4: {
        if (g_server == NULL) {
          grpc_channel_args *args = read_args(&inp);
          g_server = grpc_server_create(args, NULL);
          GPR_ASSERT(g_server != NULL);
          grpc_channel_args_destroy(args);
          grpc_server_register_completion_queue(g_server, cq, NULL);
          grpc_server_start(g_server);
          server_shutdown = false;
          GPR_ASSERT(pending_server_shutdowns == 0);
        } else {
          end(&inp);
        }
      }
      // begin server shutdown
      case 5: {
        if (g_server != NULL) {
          grpc_server_shutdown_and_notify(
              g_server, cq, create_validator(assert_success_and_decrement,
                                             &pending_server_shutdowns));
          pending_server_shutdowns++;
          server_shutdown = true;
        } else {
          end(&inp);
        }
        break;
      }
      // cancel all calls if shutdown
      case 6: {
        if (g_server != NULL && server_shutdown) {
          grpc_server_cancel_all_calls(g_server);
        } else {
          end(&inp);
        }
        break;
      }
      // destroy server
      case 7: {
        if (g_server != NULL && server_shutdown &&
            pending_server_shutdowns == 0) {
          grpc_server_destroy(g_server);
          g_server = NULL;
        } else {
          end(&inp);
        }
        break;
      }
      // check connectivity
      case 8: {
        if (g_channel != NULL) {
          uint8_t try_to_connect = next_byte(&inp);
          if (try_to_connect == 0 || try_to_connect == 1) {
            grpc_channel_check_connectivity_state(g_channel, try_to_connect);
          } else {
            end(&inp);
          }
        } else {
          end(&inp);
        }
        break;
      }
      // watch connectivity
      case 9: {
        if (g_channel != NULL) {
          grpc_connectivity_state st =
              grpc_channel_check_connectivity_state(g_channel, 0);
          if (st != GRPC_CHANNEL_FATAL_FAILURE) {
            gpr_timespec deadline = gpr_time_add(
                gpr_now(GPR_CLOCK_REALTIME),
                gpr_time_from_micros(read_uint32(&inp), GPR_TIMESPAN));
            grpc_channel_watch_connectivity_state(
                g_channel, st, deadline, cq,
                create_validator(validate_connectivity_watch,
                                 make_connectivity_watch(
                                     deadline, &pending_channel_watches)));
            pending_channel_watches++;
          }
        } else {
          end(&inp);
        }
        break;
      }
      // create a call
      case 10: {
        bool ok = true;
        if (g_channel == NULL) ok = false;
        grpc_call *parent_call = NULL;
        if (g_active_call->type != ROOT) {
          if (g_active_call->call == NULL || g_active_call->type == CLIENT) {
            end(&inp);
            break;
          }
          parent_call = g_active_call->call;
        }
        uint32_t propagation_mask = read_uint32(&inp);
        char *method = read_string(&inp);
        char *host = read_string(&inp);
        gpr_timespec deadline =
            gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
                         gpr_time_from_micros(read_uint32(&inp), GPR_TIMESPAN));

        if (ok) {
          call_state *cs = new_call(g_active_call, CLIENT);
          cs->call =
              grpc_channel_create_call(g_channel, parent_call, propagation_mask,
                                       cq, method, host, deadline, NULL);
        } else {
          end(&inp);
        }
        gpr_free(method);
        gpr_free(host);
        break;
      }
      // switch the 'current' call
      case 11: {
        g_active_call = g_active_call->next;
        break;
      }
      // queue some ops on a call
      case 12: {
        if (g_active_call->type == PENDING_SERVER ||
            g_active_call->type == ROOT || g_active_call->call == NULL) {
          end(&inp);
          break;
        }
        size_t num_ops = next_byte(&inp);
        if (num_ops > 6) {
          end(&inp);
          break;
        }
        grpc_op *ops = gpr_malloc(sizeof(grpc_op) * num_ops);
        bool ok = true;
        size_t i;
        grpc_op *op;
        for (i = 0; i < num_ops; i++) {
          op = &ops[i];
          switch (next_byte(&inp)) {
            default:
              /* invalid value */
              op->op = (grpc_op_type)-1;
              ok = false;
              break;
            case GRPC_OP_SEND_INITIAL_METADATA:
              op->op = GRPC_OP_SEND_INITIAL_METADATA;
              read_metadata(&inp, &op->data.send_initial_metadata.count,
                            &op->data.send_initial_metadata.metadata,
                            g_active_call);
              break;
            case GRPC_OP_SEND_MESSAGE:
              op->op = GRPC_OP_SEND_MESSAGE;
              op->data.send_message = read_message(&inp);
              break;
            case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
              op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
              break;
            case GRPC_OP_SEND_STATUS_FROM_SERVER:
              op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
              read_metadata(
                  &inp,
                  &op->data.send_status_from_server.trailing_metadata_count,
                  &op->data.send_status_from_server.trailing_metadata,
                  g_active_call);
              op->data.send_status_from_server.status = next_byte(&inp);
              op->data.send_status_from_server.status_details =
                  read_string(&inp);
              break;
            case GRPC_OP_RECV_INITIAL_METADATA:
              op->op = GRPC_OP_RECV_INITIAL_METADATA;
              op->data.recv_initial_metadata =
                  &g_active_call->recv_initial_metadata;
              break;
            case GRPC_OP_RECV_MESSAGE:
              op->op = GRPC_OP_RECV_MESSAGE;
              op->data.recv_message = &g_active_call->recv_message;
              break;
            case GRPC_OP_RECV_STATUS_ON_CLIENT:
              op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
              op->data.recv_status_on_client.status = &g_active_call->status;
              op->data.recv_status_on_client.trailing_metadata =
                  &g_active_call->recv_trailing_metadata;
              op->data.recv_status_on_client.status_details =
                  &g_active_call->recv_status_details;
              op->data.recv_status_on_client.status_details_capacity =
                  &g_active_call->recv_status_details_capacity;
              break;
            case GRPC_OP_RECV_CLOSE_ON_SERVER:
              op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
              op->data.recv_close_on_server.cancelled =
                  &g_active_call->cancelled;
              break;
          }
          op->reserved = NULL;
          op->flags = read_uint32(&inp);
        }
        if (ok) {
          validator *v = create_validator(finished_batch, g_active_call);
          g_active_call->pending_ops++;
          grpc_call_error error =
              grpc_call_start_batch(g_active_call->call, ops, num_ops, v, NULL);
          if (error != GRPC_CALL_OK) {
            v->validate(v->arg, false);
            gpr_free(v);
          }
        } else {
          end(&inp);
        }
        for (i = 0; i < num_ops; i++) {
          op = &ops[i];
          switch (op->op) {
            case GRPC_OP_SEND_INITIAL_METADATA:
              break;
            case GRPC_OP_SEND_MESSAGE:
              grpc_byte_buffer_destroy(op->data.send_message);
              break;
            case GRPC_OP_SEND_STATUS_FROM_SERVER:
              gpr_free((void *)op->data.send_status_from_server.status_details);
              break;
            case GRPC_OP_SEND_CLOSE_FROM_CLIENT:
            case GRPC_OP_RECV_INITIAL_METADATA:
            case GRPC_OP_RECV_MESSAGE:
            case GRPC_OP_RECV_STATUS_ON_CLIENT:
            case GRPC_OP_RECV_CLOSE_ON_SERVER:
              break;
          }
        }
        gpr_free(ops);

        break;
      }
      // cancel current call
      case 13: {
        if (g_active_call->type != ROOT && g_active_call->call != NULL) {
          grpc_call_cancel(g_active_call->call, NULL);
        } else {
          end(&inp);
        }
        break;
      }
      // get a calls peer
      case 14: {
        if (g_active_call->type != ROOT && g_active_call->call != NULL) {
          free_non_null(grpc_call_get_peer(g_active_call->call));
        } else {
          end(&inp);
        }
        break;
      }
      // get a channels target
      case 15: {
        if (g_channel != NULL) {
          free_non_null(grpc_channel_get_target(g_channel));
        } else {
          end(&inp);
        }
        break;
      }
      // send a ping on a channel
      case 16: {
        if (g_channel != NULL) {
          pending_pings++;
          grpc_channel_ping(g_channel, cq,
                            create_validator(decrement, &pending_pings), NULL);
        } else {
          end(&inp);
        }
        break;
      }
      // enable a tracer
      case 17: {
        char *tracer = read_string(&inp);
        grpc_tracer_set_enabled(tracer, 1);
        gpr_free(tracer);
        break;
      }
      // disable a tracer
      case 18: {
        char *tracer = read_string(&inp);
        grpc_tracer_set_enabled(tracer, 0);
        gpr_free(tracer);
        break;
      }
      // request a server call
      case 19: {
        if (g_server == NULL) {
          end(&inp);
          break;
        }
        call_state *cs = new_call(g_active_call, PENDING_SERVER);
        cs->pending_ops++;
        validator *v = create_validator(finished_request_call, cs);
        grpc_call_error error =
            grpc_server_request_call(g_server, &cs->call, &cs->call_details,
                                     &cs->recv_initial_metadata, cq, cq, v);
        if (error != GRPC_CALL_OK) {
          v->validate(v->arg, false);
          gpr_free(v);
        }
        break;
      }
      // destroy a call
      case 20: {
        if (g_active_call->type != ROOT &&
            g_active_call->type != PENDING_SERVER &&
            g_active_call->call != NULL) {
          destroy_call(g_active_call);
        } else {
          end(&inp);
        }
        break;
      }
    }
  }

  GPR_ASSERT(g_channel == NULL);
  GPR_ASSERT(g_server == NULL);
  GPR_ASSERT(g_active_call->type == ROOT);
  GPR_ASSERT(g_active_call->next == g_active_call);
  gpr_free(g_active_call);

  grpc_completion_queue_shutdown(cq);
  GPR_ASSERT(
      grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME), NULL)
          .type == GRPC_QUEUE_SHUTDOWN);
  grpc_completion_queue_destroy(cq);

  grpc_shutdown();
  return 0;
}