Exemplo n.º 1
0
static void basic_do_promote(grpc_exec_ctx *exec_ctx, void *args, int success) {
  grpc_unary_promote_args *up_args = args;
  const grpc_pollset_vtable *original_vtable = up_args->original_vtable;
  grpc_pollset *pollset = up_args->pollset;
  grpc_fd *fd = up_args->fd;

  /*
   * This is quite tricky. There are a number of cases to keep in mind here:
   * 1. fd may have been orphaned
   * 2. The pollset may no longer be a unary poller (and we can't let case #1
   * leak to other pollset types!)
   * 3. pollset's fd (which may have changed) may have been orphaned
   * 4. The pollset may be shutting down.
   */

  gpr_mu_lock(&pollset->mu);
  /* First we need to ensure that nobody is polling concurrently */
  GPR_ASSERT(!grpc_pollset_has_workers(pollset));

  gpr_free(up_args);
  /* At this point the pollset may no longer be a unary poller. In that case
   * we should just call the right add function and be done. */
  /* TODO(klempner): If we're not careful this could cause infinite recursion.
   * That's not a problem for now because empty_pollset has a trivial poller
   * and we don't have any mechanism to unbecome multipoller. */
  pollset->in_flight_cbs--;
  if (pollset->shutting_down) {
    /* We don't care about this pollset anymore. */
    if (pollset->in_flight_cbs == 0 && !pollset->called_shutdown) {
      finish_shutdown(exec_ctx, pollset);
    }
  } else if (grpc_fd_is_orphaned(fd)) {
    /* Don't try to add it to anything, we'll drop our ref on it below */
  } else if (pollset->vtable != original_vtable) {
    pollset->vtable->add_fd(exec_ctx, pollset, fd, 0);
  } else if (fd != pollset->data.ptr) {
    grpc_fd *fds[2];
    fds[0] = pollset->data.ptr;
    fds[1] = fd;

    if (fds[0] && !grpc_fd_is_orphaned(fds[0])) {
      grpc_platform_become_multipoller(exec_ctx, pollset, fds,
                                       GPR_ARRAY_SIZE(fds));
      GRPC_FD_UNREF(fds[0], "basicpoll");
    } else {
      /* old fd is orphaned and we haven't cleaned it up until now, so remain a
       * unary poller */
      /* Note that it is possible that fds[1] is also orphaned at this point.
       * That's okay, we'll correct it at the next add or poll. */
      if (fds[0]) GRPC_FD_UNREF(fds[0], "basicpoll");
      pollset->data.ptr = fd;
      GRPC_FD_REF(fd, "basicpoll");
    }
  }

  gpr_mu_unlock(&pollset->mu);

  /* Matching ref in basic_pollset_add_fd */
  GRPC_FD_UNREF(fd, "basicpoll_add");
}
Exemplo n.º 2
0
/* called when all listening endpoints have been shutdown, so no further
   events will be received on them - at this point it's safe to destroy
   things */
static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
  /* delete ALL the things */
  gpr_mu_lock(&s->mu);

  GPR_ASSERT(s->shutdown);

  if (s->head) {
    grpc_udp_listener *sp;
    for (sp = s->head; sp; sp = sp->next) {
      grpc_unlink_if_unix_domain_socket(&sp->addr);

      grpc_closure_init(&sp->destroyed_closure, destroyed_port, s,
                        grpc_schedule_on_exec_ctx);

      /* Call the orphan_cb to signal that the FD is about to be closed and
       * should no longer be used. */
      GPR_ASSERT(sp->orphan_cb);
      sp->orphan_cb(exec_ctx, sp->emfd, sp->server->user_data);

      grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
                     "udp_listener_shutdown");
    }
    gpr_mu_unlock(&s->mu);
  } else {
    gpr_mu_unlock(&s->mu);
    finish_shutdown(exec_ctx, s);
  }
}
Exemplo n.º 3
0
/* called when all listening endpoints have been shutdown, so no further
   events will be received on them - at this point it's safe to destroy
   things */
static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
    /* delete ALL the things */
    gpr_mu_lock(&s->mu);

    if (!s->shutdown) {
        gpr_mu_unlock(&s->mu);
        return;
    }

    if (s->head) {
        grpc_tcp_listener *sp;
        for (sp = s->head; sp; sp = sp->next) {
            if (sp->addr.sockaddr.sa_family == AF_UNIX) {
                unlink_if_unix_domain_socket(&sp->addr.un);
            }
            sp->destroyed_closure.cb = destroyed_port;
            sp->destroyed_closure.cb_arg = s;
            grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
                           "tcp_listener_shutdown");
        }
        gpr_mu_unlock(&s->mu);
    } else {
        gpr_mu_unlock(&s->mu);
        finish_shutdown(exec_ctx, s);
    }
}
Exemplo n.º 4
0
/* Public function. Stops and destroys a grpc_tcp_server. */
void grpc_tcp_server_destroy(grpc_tcp_server *s,
                             void (*shutdown_complete)(void *shutdown_done_arg),
                             void *shutdown_complete_arg) {
  size_t i;
  int immediately_done = 0;
  gpr_mu_lock(&s->mu);

  s->shutdown_complete = shutdown_complete
    ? shutdown_complete
    : dont_care_about_shutdown_completion;
  s->shutdown_complete_arg = shutdown_complete_arg;

  /* First, shutdown all fd's. This will queue abortion calls for all
     of the pending accepts due to the normal operation mechanism. */
  if (s->active_ports == 0) {
    immediately_done = 1;
  }
  for (i = 0; i < s->nports; i++) {
    server_port *sp = &s->ports[i];
    sp->shutting_down = 1;
    grpc_winsocket_shutdown(sp->socket);
  }
  gpr_mu_unlock(&s->mu);

  if (immediately_done) {
    finish_shutdown(s);
  }
}
Exemplo n.º 5
0
/* called when all listening endpoints have been shutdown, so no further
   events will be received on them - at this point it's safe to destroy
   things */
static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
  /* delete ALL the things */
  gpr_mu_lock(&s->mu);

  GPR_ASSERT(s->shutdown);

  if (s->head) {
    grpc_udp_listener *sp;
    for (sp = s->head; sp; sp = sp->next) {
      grpc_unlink_if_unix_domain_socket(&sp->addr);

      GRPC_CLOSURE_INIT(&sp->destroyed_closure, destroyed_port, s,
                        grpc_schedule_on_exec_ctx);
      if (!sp->orphan_notified) {
        /* Call the orphan_cb to signal that the FD is about to be closed and
         * should no longer be used. Because at this point, all listening ports
         * have been shutdown already, no need to shutdown again.*/
        GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, dummy_cb, sp->emfd,
                          grpc_schedule_on_exec_ctx);
        GPR_ASSERT(sp->orphan_cb);
        sp->orphan_cb(exec_ctx, sp->emfd, &sp->orphan_fd_closure,
                      sp->server->user_data);
      }
      grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
                     false /* already_closed */, "udp_listener_shutdown");
    }
    gpr_mu_unlock(&s->mu);
  } else {
    gpr_mu_unlock(&s->mu);
    finish_shutdown(exec_ctx, s);
  }
}
Exemplo n.º 6
0
/* called when all listening endpoints have been shutdown, so no further
   events will be received on them - at this point it's safe to destroy
   things */
static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
  size_t i;

  /* delete ALL the things */
  gpr_mu_lock(&s->mu);

  if (!s->shutdown) {
    gpr_mu_unlock(&s->mu);
    return;
  }

  if (s->nports) {
    for (i = 0; i < s->nports; i++) {
      server_port *sp = &s->ports[i];
      if (sp->addr.sockaddr.sa_family == AF_UNIX) {
        unlink_if_unix_domain_socket(&sp->addr.un);
      }
      sp->destroyed_closure.cb = destroyed_port;
      sp->destroyed_closure.cb_arg = s;
      grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure,
                     "udp_listener_shutdown");
    }
    gpr_mu_unlock(&s->mu);
  } else {
    gpr_mu_unlock(&s->mu);
    finish_shutdown(exec_ctx, s);
  }
}
Exemplo n.º 7
0
static void handle_close_callback(uv_handle_t *handle) {
  grpc_tcp_listener *sp = (grpc_tcp_listener *)handle->data;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  sp->server->open_ports--;
  if (sp->server->open_ports == 0) {
    finish_shutdown(&exec_ctx, sp->server);
  }
  grpc_exec_ctx_finish(&exec_ctx);
}
Exemplo n.º 8
0
static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server, int success) {
  grpc_udp_server *s = server;
  gpr_mu_lock(&s->mu);
  s->destroyed_ports++;
  if (s->destroyed_ports == s->nports) {
    gpr_mu_unlock(&s->mu);
    finish_shutdown(exec_ctx, s);
  } else {
    gpr_mu_unlock(&s->mu);
  }
}
Exemplo n.º 9
0
static void destroyed_port(void *server, int success) {
  grpc_tcp_server *s = server;
  gpr_mu_lock(&s->mu);
  s->destroyed_ports++;
  if (s->destroyed_ports == s->nports) {
    gpr_mu_unlock(&s->mu);
    finish_shutdown(s);
  } else {
    gpr_mu_unlock(&s->mu);
  }
}
Exemplo n.º 10
0
static void do_shutdown(struct cardstate *cs)
{
	gigaset_block_channels(cs);

	if (cs->mstate == MS_READY) {
		cs->mstate = MS_SHUTDOWN;
		cs->at_state.pending_commands |= PC_SHUTDOWN;
		gig_dbg(DEBUG_EVENT, "Scheduling PC_SHUTDOWN");
		cs->commands_pending = 1;
	} else
		finish_shutdown(cs);
}
Exemplo n.º 11
0
void grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
                       gpr_timespec now, gpr_timespec deadline) {
  /* pollset->mu already held */
  int added_worker = 0;
  /* this must happen before we (potentially) drop pollset->mu */
  worker->next = worker->prev = NULL;
  /* TODO(ctiller): pool these */
  grpc_wakeup_fd_init(&worker->wakeup_fd);
  if (grpc_maybe_call_delayed_callbacks(&pollset->mu, 1)) {
    goto done;
  }
  if (grpc_alarm_check(&pollset->mu, now, &deadline)) {
    goto done;
  }
  if (pollset->shutting_down) {
    goto done;
  }
  if (pollset->in_flight_cbs) {
    /* Give do_promote priority so we don't starve it out */
    gpr_mu_unlock(&pollset->mu);
    gpr_mu_lock(&pollset->mu);
    goto done;
  }
  if (!pollset->kicked_without_pollers) {
    push_front_worker(pollset, worker);
    added_worker = 1;
    gpr_tls_set(&g_current_thread_poller, (gpr_intptr)pollset);
    pollset->vtable->maybe_work(pollset, worker, deadline, now, 1);
    gpr_tls_set(&g_current_thread_poller, 0);
  } else {
    pollset->kicked_without_pollers = 0;
  }
done:
  grpc_wakeup_fd_destroy(&worker->wakeup_fd);
  if (added_worker) {
    remove_worker(pollset, worker);
  }
  if (pollset->shutting_down) {
    if (grpc_pollset_has_workers(pollset)) {
      grpc_pollset_kick(pollset, NULL);
    } else if (!pollset->called_shutdown && pollset->in_flight_cbs == 0) {
      pollset->called_shutdown = 1;
      gpr_mu_unlock(&pollset->mu);
      finish_shutdown(pollset);
      /* Continuing to access pollset here is safe -- it is the caller's
       * responsibility to not destroy when it has outstanding calls to
       * grpc_pollset_work.
       * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
      gpr_mu_lock(&pollset->mu);
    }
  }
}
Exemplo n.º 12
0
static void decrement_active_ports_and_notify(server_port *sp) {
  int notify = 0;
  sp->shutting_down = 0;
  gpr_mu_lock(&sp->server->mu);
  GPR_ASSERT(sp->server->active_ports > 0);
  if (0 == --sp->server->active_ports && sp->server->shutdown_complete != NULL) {
    notify = 1;
  }
  gpr_mu_unlock(&sp->server->mu);
  if (notify) {
    finish_shutdown(sp->server);
  }
}
Exemplo n.º 13
0
static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
                           bool success) {
    grpc_tcp_server *s = server;
    gpr_mu_lock(&s->mu);
    s->destroyed_ports++;
    if (s->destroyed_ports == s->nports) {
        gpr_mu_unlock(&s->mu);
        finish_shutdown(exec_ctx, s);
    } else {
        GPR_ASSERT(s->destroyed_ports < s->nports);
        gpr_mu_unlock(&s->mu);
    }
}
Exemplo n.º 14
0
static void decrement_active_ports_and_notify(grpc_exec_ctx *exec_ctx,
                                              grpc_tcp_listener *sp) {
  int notify = 0;
  sp->shutting_down = 0;
  gpr_mu_lock(&sp->server->mu);
  GPR_ASSERT(sp->server->active_ports > 0);
  if (0 == --sp->server->active_ports) {
    notify = 1;
  }
  gpr_mu_unlock(&sp->server->mu);
  if (notify) {
    finish_shutdown(exec_ctx, sp->server);
  }
}
Exemplo n.º 15
0
static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
  int immediately_done = 0;
  grpc_tcp_listener *sp;

  if (s->open_ports == 0) {
    immediately_done = 1;
  }
  for (sp = s->head; sp; sp = sp->next) {
    uv_close((uv_handle_t *)sp->handle, handle_close_callback);
  }

  if (immediately_done) {
    finish_shutdown(exec_ctx, s);
  }
}
Exemplo n.º 16
0
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                           grpc_closure *closure) {
    GPR_ASSERT(!pollset->shutting_down);
    pollset->shutting_down = 1;
    pollset->shutdown_done = closure;
    grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
    if (!grpc_pollset_has_workers(pollset)) {
        grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
    }
    if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 &&
            !grpc_pollset_has_workers(pollset)) {
        pollset->called_shutdown = 1;
        finish_shutdown(exec_ctx, pollset);
    }
}
Exemplo n.º 17
0
static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
  int immediately_done = 0;
  grpc_tcp_listener *sp;
  gpr_mu_lock(&s->mu);

  /* First, shutdown all fd's. This will queue abortion calls for all
     of the pending accepts due to the normal operation mechanism. */
  if (s->active_ports == 0) {
    immediately_done = 1;
  }
  for (sp = s->head; sp; sp = sp->next) {
    sp->shutting_down = 1;
    grpc_winsocket_shutdown(sp->socket);
  }
  gpr_mu_unlock(&s->mu);

  if (immediately_done) {
    finish_shutdown(exec_ctx, s);
  }
}
Exemplo n.º 18
0
void grpc_pollset_shutdown(grpc_pollset *pollset,
                           void (*shutdown_done)(void *arg),
                           void *shutdown_done_arg) {
  int call_shutdown = 0;
  gpr_mu_lock(&pollset->mu);
  GPR_ASSERT(!pollset->shutting_down);
  pollset->shutting_down = 1;
  if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 &&
      !grpc_pollset_has_workers(pollset)) {
    pollset->called_shutdown = 1;
    call_shutdown = 1;
  }
  pollset->shutdown_done_cb = shutdown_done;
  pollset->shutdown_done_arg = shutdown_done_arg;
  grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
  gpr_mu_unlock(&pollset->mu);

  if (call_shutdown) {
    finish_shutdown(pollset);
  }
}
Exemplo n.º 19
0
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                           grpc_closure *closure) {
  int call_shutdown = 0;
  gpr_mu_lock(&pollset->mu);
  GPR_ASSERT(!pollset->shutting_down);
  pollset->shutting_down = 1;
  if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 &&
      !grpc_pollset_has_workers(pollset)) {
    pollset->called_shutdown = 1;
    call_shutdown = 1;
  }
  if (!grpc_pollset_has_workers(pollset)) {
    grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
  }
  pollset->shutdown_done = closure;
  grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
  gpr_mu_unlock(&pollset->mu);

  if (call_shutdown) {
    finish_shutdown(exec_ctx, pollset);
  }
}
Exemplo n.º 20
0
void grpc_tcp_server_destroy(
    grpc_tcp_server *s, void (*shutdown_complete)(void *shutdown_complete_arg),
    void *shutdown_complete_arg) {
  size_t i;
  gpr_mu_lock(&s->mu);

  s->shutdown_complete = shutdown_complete
                             ? shutdown_complete
                             : dont_care_about_shutdown_completion;
  s->shutdown_complete_arg = shutdown_complete_arg;

  /* shutdown all fd's */
  for (i = 0; i < s->nports; i++) {
    grpc_fd_shutdown(s->ports[i].emfd);
  }
  /* wait while that happens */
  /* TODO(ctiller): make this asynchronous also */
  while (s->active_ports) {
    gpr_cv_wait(&s->cv, &s->mu, gpr_inf_future);
  }

  /* delete ALL the things */
  if (s->nports) {
    for (i = 0; i < s->nports; i++) {
      server_port *sp = &s->ports[i];
      if (sp->addr.sockaddr.sa_family == AF_UNIX) {
        unlink_if_unix_domain_socket(&sp->addr.un);
      }
      grpc_fd_orphan(sp->emfd, destroyed_port, s);
    }
    gpr_mu_unlock(&s->mu);
  } else {
    gpr_mu_unlock(&s->mu);
    finish_shutdown(s);
  }
}
Exemplo n.º 21
0
static void do_action(int action, struct cardstate *cs,
		      struct bc_state *bcs,
		      struct at_state_t **p_at_state, char **pp_command,
		      int *p_genresp, int *p_resp_code,
		      struct event_t *ev)
{
	struct at_state_t *at_state = *p_at_state;
	struct bc_state *bcs2;
	unsigned long flags;

	int channel;

	unsigned char *s, *e;
	int i;
	unsigned long val;

	switch (action) {
	case ACT_NOTHING:
		break;
	case ACT_TIMEOUT:
		at_state->waiting = 1;
		break;
	case ACT_INIT:
		cs->at_state.pending_commands &= ~PC_INIT;
		cs->cur_at_seq = SEQ_NONE;
		cs->mode = M_UNIMODEM;
		spin_lock_irqsave(&cs->lock, flags);
		if (!cs->cidmode) {
			spin_unlock_irqrestore(&cs->lock, flags);
			gigaset_free_channels(cs);
			cs->mstate = MS_READY;
			break;
		}
		spin_unlock_irqrestore(&cs->lock, flags);
		cs->at_state.pending_commands |= PC_CIDMODE;
		gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
		cs->commands_pending = 1;
		break;
	case ACT_FAILINIT:
		dev_warn(cs->dev, "Could not initialize the device.\n");
		cs->dle = 0;
		init_failed(cs, M_UNKNOWN);
		cs->cur_at_seq = SEQ_NONE;
		break;
	case ACT_CONFIGMODE:
		init_failed(cs, M_CONFIG);
		cs->cur_at_seq = SEQ_NONE;
		break;
	case ACT_SETDLE1:
		cs->dle = 1;
		/* cs->inbuf[0].inputstate |= INS_command | INS_DLE_command; */
		cs->inbuf[0].inputstate &=
			~(INS_command | INS_DLE_command);
		break;
	case ACT_SETDLE0:
		cs->dle = 0;
		cs->inbuf[0].inputstate =
			(cs->inbuf[0].inputstate & ~INS_DLE_command)
			| INS_command;
		break;
	case ACT_CMODESET:
		if (cs->mstate == MS_INIT || cs->mstate == MS_RECOVER) {
			gigaset_free_channels(cs);
			cs->mstate = MS_READY;
		}
		cs->mode = M_CID;
		cs->cur_at_seq = SEQ_NONE;
		break;
	case ACT_UMODESET:
		cs->mode = M_UNIMODEM;
		cs->cur_at_seq = SEQ_NONE;
		break;
	case ACT_FAILCMODE:
		cs->cur_at_seq = SEQ_NONE;
		if (cs->mstate == MS_INIT || cs->mstate == MS_RECOVER) {
			init_failed(cs, M_UNKNOWN);
			break;
		}
		if (reinit_and_retry(cs, -1) < 0)
			schedule_init(cs, MS_RECOVER);
		break;
	case ACT_FAILUMODE:
		cs->cur_at_seq = SEQ_NONE;
		schedule_init(cs, MS_RECOVER);
		break;
	case ACT_HUPMODEM:
		/* send "+++" (hangup in unimodem mode) */
		if (cs->connected) {
			struct cmdbuf_t *cb;

			cb = kmalloc(sizeof(struct cmdbuf_t) + 3, GFP_ATOMIC);
			if (!cb) {
				dev_err(cs->dev, "%s: out of memory\n",
					__func__);
				return;
			}
			memcpy(cb->buf, "+++", 3);
			cb->len = 3;
			cb->offset = 0;
			cb->next = NULL;
			cb->wake_tasklet = NULL;
			cs->ops->write_cmd(cs, cb);
		}
		break;
	case ACT_RING:
		/* get fresh AT state structure for new CID */
		at_state = get_free_channel(cs, ev->parameter);
		if (!at_state) {
			dev_warn(cs->dev,
				 "RING ignored: could not allocate channel structure\n");
			break;
		}

		/* initialize AT state structure
		 * note that bcs may be NULL if no B channel is free
		 */
		at_state->ConState = 700;
		for (i = 0; i < STR_NUM; ++i) {
			kfree(at_state->str_var[i]);
			at_state->str_var[i] = NULL;
		}
		at_state->int_var[VAR_ZCTP] = -1;

		spin_lock_irqsave(&cs->lock, flags);
		at_state->timer_expires = RING_TIMEOUT;
		at_state->timer_active = 1;
		spin_unlock_irqrestore(&cs->lock, flags);
		break;
	case ACT_ICALL:
		handle_icall(cs, bcs, at_state);
		break;
	case ACT_FAILSDOWN:
		dev_warn(cs->dev, "Could not shut down the device.\n");
		/* fall through */
	case ACT_FAKESDOWN:
	case ACT_SDOWN:
		cs->cur_at_seq = SEQ_NONE;
		finish_shutdown(cs);
		break;
	case ACT_CONNECT:
		if (cs->onechannel) {
			at_state->pending_commands |= PC_DLE1;
			cs->commands_pending = 1;
			break;
		}
		bcs->chstate |= CHS_D_UP;
		gigaset_isdn_connD(bcs);
		cs->ops->init_bchannel(bcs);
		break;
	case ACT_DLE1:
		cs->cur_at_seq = SEQ_NONE;
		bcs = cs->bcs + cs->curchannel;

		bcs->chstate |= CHS_D_UP;
		gigaset_isdn_connD(bcs);
		cs->ops->init_bchannel(bcs);
		break;
	case ACT_FAKEHUP:
		at_state->int_var[VAR_ZSAU] = ZSAU_NULL;
		/* fall through */
	case ACT_DISCONNECT:
		cs->cur_at_seq = SEQ_NONE;
		at_state->cid = -1;
		if (!bcs) {
			disconnect_nobc(p_at_state, cs);
		} else if (cs->onechannel && cs->dle) {
			/* Check for other open channels not needed:
			 * DLE only used for M10x with one B channel.
			 */
			at_state->pending_commands |= PC_DLE0;
			cs->commands_pending = 1;
		} else {
			disconnect_bc(at_state, cs, bcs);
		}
		break;
	case ACT_FAKEDLE0:
		at_state->int_var[VAR_ZDLE] = 0;
		cs->dle = 0;
		/* fall through */
	case ACT_DLE0:
		cs->cur_at_seq = SEQ_NONE;
		bcs2 = cs->bcs + cs->curchannel;
		disconnect_bc(&bcs2->at_state, cs, bcs2);
		break;
	case ACT_ABORTHUP:
		cs->cur_at_seq = SEQ_NONE;
		dev_warn(cs->dev, "Could not hang up.\n");
		at_state->cid = -1;
		if (!bcs)
			disconnect_nobc(p_at_state, cs);
		else if (cs->onechannel)
			at_state->pending_commands |= PC_DLE0;
		else
			disconnect_bc(at_state, cs, bcs);
		schedule_init(cs, MS_RECOVER);
		break;
	case ACT_FAILDLE0:
		cs->cur_at_seq = SEQ_NONE;
		dev_warn(cs->dev, "Error leaving DLE mode.\n");
		cs->dle = 0;
		bcs2 = cs->bcs + cs->curchannel;
		disconnect_bc(&bcs2->at_state, cs, bcs2);
		schedule_init(cs, MS_RECOVER);
		break;
	case ACT_FAILDLE1:
		cs->cur_at_seq = SEQ_NONE;
		dev_warn(cs->dev,
			 "Could not enter DLE mode. Trying to hang up.\n");
		channel = cs->curchannel;
		cs->bcs[channel].at_state.pending_commands |= PC_HUP;
		cs->commands_pending = 1;
		break;

	case ACT_CID: /* got cid; start dialing */
		cs->cur_at_seq = SEQ_NONE;
		channel = cs->curchannel;
		if (ev->parameter > 0 && ev->parameter <= 65535) {
			cs->bcs[channel].at_state.cid = ev->parameter;
			cs->bcs[channel].at_state.pending_commands |=
				PC_DIAL;
			cs->commands_pending = 1;
			break;
		}
		/* bad cid: fall through */
	case ACT_FAILCID:
		cs->cur_at_seq = SEQ_NONE;
		channel = cs->curchannel;
		if (reinit_and_retry(cs, channel) < 0) {
			dev_warn(cs->dev,
				 "Could not get a call ID. Cannot dial.\n");
			bcs2 = cs->bcs + channel;
			disconnect_bc(&bcs2->at_state, cs, bcs2);
		}
		break;
	case ACT_ABORTCID:
		cs->cur_at_seq = SEQ_NONE;
		bcs2 = cs->bcs + cs->curchannel;
		disconnect_bc(&bcs2->at_state, cs, bcs2);
		break;

	case ACT_DIALING:
	case ACT_ACCEPTED:
		cs->cur_at_seq = SEQ_NONE;
		break;

	case ACT_ABORTACCEPT:	/* hangup/error/timeout during ICALL procssng */
		if (bcs)
			disconnect_bc(at_state, cs, bcs);
		else
			disconnect_nobc(p_at_state, cs);
		break;

	case ACT_ABORTDIAL:	/* error/timeout during dial preparation */
		cs->cur_at_seq = SEQ_NONE;
		at_state->pending_commands |= PC_HUP;
		cs->commands_pending = 1;
		break;

	case ACT_REMOTEREJECT:	/* DISCONNECT_IND after dialling */
	case ACT_CONNTIMEOUT:	/* timeout waiting for ZSAU=ACTIVE */
	case ACT_REMOTEHUP:	/* DISCONNECT_IND with established connection */
		at_state->pending_commands |= PC_HUP;
		cs->commands_pending = 1;
		break;
	case ACT_GETSTRING: /* warning: RING, ZDLE, ...
			       are not handled properly anymore */
		at_state->getstring = 1;
		break;
	case ACT_SETVER:
		if (!ev->ptr) {
			*p_genresp = 1;
			*p_resp_code = RSP_ERROR;
			break;
		}
		s = ev->ptr;

		if (!strcmp(s, "OK")) {
			/* OK without version string: assume old response */
			*p_genresp = 1;
			*p_resp_code = RSP_NONE;
			break;
		}

		for (i = 0; i < 4; ++i) {
			val = simple_strtoul(s, (char **) &e, 10);
			if (val > INT_MAX || e == s)
				break;
			if (i == 3) {
				if (*e)
					break;
			} else if (*e != '.')
				break;
			else
				s = e + 1;
			cs->fwver[i] = val;
		}
		if (i != 4) {
			*p_genresp = 1;
			*p_resp_code = RSP_ERROR;
			break;
		}
		cs->gotfwver = 0;
		break;
	case ACT_GOTVER:
		if (cs->gotfwver == 0) {
			cs->gotfwver = 1;
			gig_dbg(DEBUG_EVENT,
				"firmware version %02d.%03d.%02d.%02d",
				cs->fwver[0], cs->fwver[1],
				cs->fwver[2], cs->fwver[3]);
			break;
		}
		/* fall through */
	case ACT_FAILVER:
		cs->gotfwver = -1;
		dev_err(cs->dev, "could not read firmware version.\n");
		break;
	case ACT_ERROR:
		gig_dbg(DEBUG_ANY, "%s: ERROR response in ConState %d",
			__func__, at_state->ConState);
		cs->cur_at_seq = SEQ_NONE;
		break;
	case ACT_DEBUG:
		gig_dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d",
			__func__, ev->type, at_state->ConState);
		break;
	case ACT_WARN:
		dev_warn(cs->dev, "%s: resp_code %d in ConState %d!\n",
			 __func__, ev->type, at_state->ConState);
		break;
	case ACT_ZCAU:
		dev_warn(cs->dev, "cause code %04x in connection state %d.\n",
			 ev->parameter, at_state->ConState);
		break;

	/* events from the LL */

	case ACT_DIAL:
		if (!ev->ptr) {
			*p_genresp = 1;
			*p_resp_code = RSP_ERROR;
			break;
		}
		start_dial(at_state, ev->ptr, ev->parameter);
		break;
	case ACT_ACCEPT:
		start_accept(at_state);
		break;
	case ACT_HUP:
		at_state->pending_commands |= PC_HUP;
		gig_dbg(DEBUG_EVENT, "Scheduling PC_HUP");
		cs->commands_pending = 1;
		break;

	/* hotplug events */

	case ACT_STOP:
		do_stop(cs);
		break;
	case ACT_START:
		do_start(cs);
		break;

	/* events from the interface */

	case ACT_IF_LOCK:
		cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs);
		cs->waiting = 0;
		wake_up(&cs->waitqueue);
		break;
	case ACT_IF_VER:
		if (ev->parameter != 0)
			cs->cmd_result = -EINVAL;
		else if (cs->gotfwver != 1) {
			cs->cmd_result = -ENOENT;
		} else {
			memcpy(ev->arg, cs->fwver, sizeof cs->fwver);
			cs->cmd_result = 0;
		}
		cs->waiting = 0;
		wake_up(&cs->waitqueue);
		break;

	/* events from the proc file system */

	case ACT_PROC_CIDMODE:
		spin_lock_irqsave(&cs->lock, flags);
		if (ev->parameter != cs->cidmode) {
			cs->cidmode = ev->parameter;
			if (ev->parameter) {
				cs->at_state.pending_commands |= PC_CIDMODE;
				gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
			} else {
				cs->at_state.pending_commands |= PC_UMMODE;
				gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
			}
			cs->commands_pending = 1;
		}
		spin_unlock_irqrestore(&cs->lock, flags);
		cs->waiting = 0;
		wake_up(&cs->waitqueue);
		break;

	/* events from the hardware drivers */

	case ACT_NOTIFY_BC_DOWN:
		bchannel_down(bcs);
		break;
	case ACT_NOTIFY_BC_UP:
		bchannel_up(bcs);
		break;
	case ACT_SHUTDOWN:
		do_shutdown(cs);
		break;


	default:
		if (action >= ACT_CMD && action < ACT_CMD + AT_NUM) {
			*pp_command = at_state->bcs->commands[action - ACT_CMD];
			if (!*pp_command) {
				*p_genresp = 1;
				*p_resp_code = RSP_NULL;
			}
		} else
			dev_err(cs->dev, "%s: action==%d!\n", __func__, action);
	}
}
Exemplo n.º 22
0
void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                       grpc_pollset_worker **worker_hdl, gpr_timespec now,
                       gpr_timespec deadline) {
    grpc_pollset_worker worker;
    *worker_hdl = &worker;

    /* pollset->mu already held */
    int added_worker = 0;
    int locked = 1;
    int queued_work = 0;
    int keep_polling = 0;
    GPR_TIMER_BEGIN("grpc_pollset_work", 0);
    /* this must happen before we (potentially) drop pollset->mu */
    worker.next = worker.prev = NULL;
    worker.reevaluate_polling_on_wakeup = 0;
    if (pollset->local_wakeup_cache != NULL) {
        worker.wakeup_fd = pollset->local_wakeup_cache;
        pollset->local_wakeup_cache = worker.wakeup_fd->next;
    } else {
        worker.wakeup_fd = gpr_malloc(sizeof(*worker.wakeup_fd));
        grpc_wakeup_fd_init(&worker.wakeup_fd->fd);
    }
    worker.kicked_specifically = 0;
    /* If there's work waiting for the pollset to be idle, and the
       pollset is idle, then do that work */
    if (!grpc_pollset_has_workers(pollset) &&
            !grpc_closure_list_empty(pollset->idle_jobs)) {
        GPR_TIMER_MARK("grpc_pollset_work.idle_jobs", 0);
        grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
        goto done;
    }
    /* If we're shutting down then we don't execute any extended work */
    if (pollset->shutting_down) {
        GPR_TIMER_MARK("grpc_pollset_work.shutting_down", 0);
        goto done;
    }
    /* Give do_promote priority so we don't starve it out */
    if (pollset->in_flight_cbs) {
        GPR_TIMER_MARK("grpc_pollset_work.in_flight_cbs", 0);
        gpr_mu_unlock(&pollset->mu);
        locked = 0;
        goto done;
    }
    /* Start polling, and keep doing so while we're being asked to
       re-evaluate our pollers (this allows poll() based pollers to
       ensure they don't miss wakeups) */
    keep_polling = 1;
    while (keep_polling) {
        keep_polling = 0;
        if (!pollset->kicked_without_pollers) {
            if (!added_worker) {
                push_front_worker(pollset, &worker);
                added_worker = 1;
                gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
            }
            gpr_tls_set(&g_current_thread_poller, (intptr_t)pollset);
            GPR_TIMER_BEGIN("maybe_work_and_unlock", 0);
            pollset->vtable->maybe_work_and_unlock(exec_ctx, pollset, &worker,
                                                   deadline, now);
            GPR_TIMER_END("maybe_work_and_unlock", 0);
            locked = 0;
            gpr_tls_set(&g_current_thread_poller, 0);
        } else {
            GPR_TIMER_MARK("grpc_pollset_work.kicked_without_pollers", 0);
            pollset->kicked_without_pollers = 0;
        }
        /* Finished execution - start cleaning up.
           Note that we may arrive here from outside the enclosing while() loop.
           In that case we won't loop though as we haven't added worker to the
           worker list, which means nobody could ask us to re-evaluate polling). */
done:
        if (!locked) {
            queued_work |= grpc_exec_ctx_flush(exec_ctx);
            gpr_mu_lock(&pollset->mu);
            locked = 1;
        }
        /* If we're forced to re-evaluate polling (via grpc_pollset_kick with
           GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) then we land here and force
           a loop */
        if (worker.reevaluate_polling_on_wakeup) {
            worker.reevaluate_polling_on_wakeup = 0;
            pollset->kicked_without_pollers = 0;
            if (queued_work || worker.kicked_specifically) {
                /* If there's queued work on the list, then set the deadline to be
                   immediate so we get back out of the polling loop quickly */
                deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
            }
            keep_polling = 1;
        }
    }
    if (added_worker) {
        remove_worker(pollset, &worker);
        gpr_tls_set(&g_current_thread_worker, 0);
    }
    /* release wakeup fd to the local pool */
    worker.wakeup_fd->next = pollset->local_wakeup_cache;
    pollset->local_wakeup_cache = worker.wakeup_fd;
    /* check shutdown conditions */
    if (pollset->shutting_down) {
        if (grpc_pollset_has_workers(pollset)) {
            grpc_pollset_kick(pollset, NULL);
        } else if (!pollset->called_shutdown && pollset->in_flight_cbs == 0) {
            pollset->called_shutdown = 1;
            gpr_mu_unlock(&pollset->mu);
            finish_shutdown(exec_ctx, pollset);
            grpc_exec_ctx_flush(exec_ctx);
            /* Continuing to access pollset here is safe -- it is the caller's
             * responsibility to not destroy when it has outstanding calls to
             * grpc_pollset_work.
             * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
            gpr_mu_lock(&pollset->mu);
        } else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
            grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
            gpr_mu_unlock(&pollset->mu);
            grpc_exec_ctx_flush(exec_ctx);
            gpr_mu_lock(&pollset->mu);
        }
    }
    *worker_hdl = NULL;
    GPR_TIMER_END("grpc_pollset_work", 0);
}