Esempio n. 1
0
/* pollset_shutdown is guaranteed to be called before pollset_destroy. */
static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
  pollable_destroy(&pollset->pollable);
  if (pollset_is_pollable_fd(pollset, pollset->current_pollable)) {
    UNREF_BY(exec_ctx, (grpc_fd *)pollset->current_pollable, 2,
             "pollset_pollable");
  }
  GRPC_LOG_IF_ERROR("pollset_process_events",
                    pollset_process_events(exec_ctx, pollset, true));
}
Esempio n. 2
0
void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done,
                    const char *reason) {
  fd->on_done_closure = on_done;
  shutdown(fd->fd, SHUT_RDWR);
  REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
  gpr_mu_lock(&fd->watcher_mu);
  wake_all_watchers_locked(fd);
  gpr_mu_unlock(&fd->watcher_mu);
  UNREF_BY(fd, 2, reason); /* drop the reference */
}
Esempio n. 3
0
void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
                    const char *reason) {
  fd->on_done_closure = on_done;
  shutdown(fd->fd, SHUT_RDWR);
  gpr_mu_lock(&fd->mu);
  REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
  if (!has_watchers(fd)) {
    fd->closed = 1;
    close(fd->fd);
    grpc_exec_ctx_enqueue(exec_ctx, fd->on_done_closure, 1);
  } else {
    wake_all_watchers_locked(fd);
  }
  gpr_mu_unlock(&fd->mu);
  UNREF_BY(fd, 2, reason); /* drop the reference */
}
Esempio n. 4
0
static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                       grpc_pollset_worker *worker,
                       grpc_pollset_worker **worker_hdl) {
  if (NEW_ROOT ==
      worker_remove(&worker->pollable->root_worker, PWL_POLLABLE, worker)) {
    gpr_cv_signal(&worker->pollable->root_worker->cv);
  }
  if (worker->initialized_cv) {
    gpr_cv_destroy(&worker->cv);
  }
  if (pollset_is_pollable_fd(pollset, worker->pollable)) {
    UNREF_BY(exec_ctx, (grpc_fd *)worker->pollable, 2, "one_poll");
  }
  if (EMPTIED == worker_remove(&pollset->root_worker, PWL_POLLSET, worker)) {
    pollset_maybe_finish_shutdown(exec_ctx, pollset);
  }
}
Esempio n. 5
0
void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done,
                    const char *reason) {
    fd->on_done_closure = on_done;
    shutdown(fd->fd, SHUT_RDWR);
    gpr_mu_lock(&fd->watcher_mu);
    REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
    if (!has_watchers(fd)) {
        fd->closed = 1;
        close(fd->fd);
        if (fd->on_done_closure) {
            grpc_iomgr_add_callback(fd->on_done_closure);
        }
    } else {
        wake_all_watchers_locked(fd);
    }
    gpr_mu_unlock(&fd->watcher_mu);
    UNREF_BY(fd, 2, reason); /* drop the reference */
}
Esempio n. 6
0
void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
                    int *release_fd, const char *reason) {
  fd->on_done_closure = on_done;
  fd->released = release_fd != NULL;
  if (!fd->released) {
    shutdown(fd->fd, SHUT_RDWR);
  } else {
    *release_fd = fd->fd;
  }
  gpr_mu_lock(&fd->mu);
  REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
  if (!has_watchers(fd)) {
    close_fd_locked(exec_ctx, fd);
  } else {
    wake_all_watchers_locked(fd);
  }
  gpr_mu_unlock(&fd->mu);
  UNREF_BY(fd, 2, reason); /* drop the reference */
}
Esempio n. 7
0
static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
                      grpc_closure *on_done, int *release_fd,
                      const char *reason) {
  bool is_fd_closed = false;
  grpc_error *error = GRPC_ERROR_NONE;

  gpr_mu_lock(&fd->pollable.po.mu);
  gpr_mu_lock(&fd->orphaned_mu);
  fd->on_done_closure = on_done;

  /* If release_fd is not NULL, we should be relinquishing control of the file
     descriptor fd->fd (but we still own the grpc_fd structure). */
  if (release_fd != NULL) {
    *release_fd = fd->fd;
  } else {
    close(fd->fd);
    is_fd_closed = true;
  }

  fd->orphaned = true;

  if (!is_fd_closed) {
    gpr_log(GPR_DEBUG, "TODO: handle fd removal?");
  }

  /* Remove the active status but keep referenced. We want this grpc_fd struct
     to be alive (and not added to freelist) until the end of this function */
  REF_BY(fd, 1, reason);

  GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));

  gpr_mu_unlock(&fd->orphaned_mu);
  gpr_mu_unlock(&fd->pollable.po.mu);
  UNREF_BY(exec_ctx, fd, 2, reason); /* Drop the reference */
  GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error));
  GRPC_ERROR_UNREF(error);
}
Esempio n. 8
0
static void unref_fd_no_longer_poller(grpc_exec_ctx *exec_ctx, void *arg,
                                      grpc_error *error) {
  grpc_fd *fd = arg;
  UNREF_BY(exec_ctx, fd, 2, "pollset_pollable");
}