Example #1
0
static void citp_epoll_dtor(citp_fdinfo* fdi, int fdt_locked)
{
  struct citp_epoll_fd* ep = fdi_to_epoll(fdi);

  if (!oo_atomic_dec_and_test(&ep->refcount))
    return;

  CITP_FDTABLE_LOCK();
  ci_tcp_helper_close_no_trampoline(ep->shared->epfd);
  __citp_fdtable_reserve(ep->shared->epfd, 0);
  munmap(ep->shared, sizeof(*ep->shared));

  ci_tcp_helper_close_no_trampoline(ep->epfd_os);
  __citp_fdtable_reserve(ep->epfd_os, 0);
  CITP_FDTABLE_UNLOCK();

  CI_FREE_OBJ(ep);
}
Example #2
0
static void citp_passthrough_dtor(citp_fdinfo* fdi, int fdt_locked)
{
  citp_alien_fdi* epi = fdi_to_alien_fdi(fdi);

  CITP_FDTABLE_LOCK();
  ci_tcp_helper_close_no_trampoline(epi->os_socket);
  __citp_fdtable_reserve(epi->os_socket, 0);
  CITP_FDTABLE_UNLOCK();
  citp_netif_release_ref(fdi_to_alien_fdi(fdi)->netif, fdt_locked);
}
Example #3
0
void citp_log_change_fd(void)
{
  int newfd, prev;
  /* We need to change logging fd, probably because someone wants to do a
  ** dup2() onto it.
  **
  ** No need to set 'close-on-exec' (FD_CLOEXEC) again for the newfd as
  ** it will be copied by the dup().
  */
  CITP_FDTABLE_LOCK();
  prev = citp.log_fd;
  newfd = oo_fcntl_dupfd_cloexec(prev, 3);
  if( newfd >= 0 ) {
    __citp_fdtable_reserve(newfd, 1);
    citp.log_fd = newfd;
  }
  Log_S(log("%s: old=%d new=%d", __FUNCTION__, prev, newfd));
  __citp_fdtable_reserve(prev, 0);
  ci_sys_close(prev);
  CITP_FDTABLE_UNLOCK();
}
Example #4
0
void citp_passthrough_init(citp_alien_fdi* epi)
{
  int rc = oo_os_sock_get(epi->netif, epi->ep->bufid, &epi->os_socket);

  /* No sensible way to handle oo_os_sock_get failure.  Just record it. */
  if( rc != 0 ) {
    Log_U(ci_log("%s: oo_os_sock_get([%d:%d]) returned %d",
                 __func__, NI_ID(epi->netif), epi->ep->bufid, rc));
    epi->os_socket = -1;
    return;
  }
  __citp_fdtable_reserve(epi->os_socket, 1);
  /* ci_tcp_helper_get_sock_fd gets the citp_dup2_lock lock: release it  */
  oo_rwlock_unlock_read(&citp_dup2_lock);
}
Example #5
0
int citp_epoll_create(int size, int flags)
{
  citp_fdinfo    *fdi;
  citp_epoll_fdi *epi;
  struct citp_epoll_fd* ep;
  int            fd;

  if( (epi = CI_ALLOC_OBJ(citp_epoll_fdi)) == NULL )
    goto fail0;
  if( (ep = CI_ALLOC_OBJ(struct citp_epoll_fd)) == NULL )
    goto fail1;
  fdi = &epi->fdinfo;
  citp_fdinfo_init(fdi, &citp_epoll_protocol_impl);

  /* Create the epoll fd. */
  CITP_FDTABLE_LOCK();
  if( (fd = ci_sys_epoll_create_compat(size, flags, 0)) < 0 )
    goto fail2;
  citp_fdtable_new_fd_set(fd, fdip_busy, TRUE);

  /* Init epfd_os */
#ifdef O_CLOEXEC
  ep->epfd_os = ci_sys_open(OO_EPOLL_DEV, O_RDWR | O_CLOEXEC);
#else
  ep->epfd_os = ci_sys_open(OO_EPOLL_DEV, O_RDWR);
  if( ep->epfd_os >= 0 )
    ci_sys_fcntl(ep->epfd_os, F_SETFD, FD_CLOEXEC);
#endif
  if( ep->epfd_os < 0 ) {
    Log_E(ci_log("%s: ERROR: failed to open(%s) errno=%d",
                 __FUNCTION__, OO_EPOLL_DEV, errno));
    goto fail3;
  }
  __citp_fdtable_reserve(ep->epfd_os, 1);
  ep->shared = mmap(NULL, sizeof(*ep->shared), PROT_READ, MAP_SHARED,
                     ep->epfd_os, 0);
  if( ep->shared == MAP_FAILED ) {
    Log_E(ci_log("%s: ERROR: failed to mmap shared segment errno=%d",
                 __FUNCTION__, errno));
    goto fail4;
  }
  __citp_fdtable_reserve(ep->shared->epfd, 1);
  CITP_FDTABLE_UNLOCK();

  epi->epoll = ep;
  ep->size = size;
  oo_wqlock_init(&ep->lock);
  ep->not_mt_safe = ! CITP_OPTS.ul_epoll_mt_safe;
  ci_dllist_init(&ep->oo_sockets);
  ep->oo_sockets_n = 0;
  ci_dllist_init(&ep->dead_sockets);
  oo_atomic_set(&ep->refcount, 1);
  ep->epfd_syncs_needed = 0;
  ep->blocking = 0;
  citp_fdtable_insert(fdi, fd, 0);
  Log_POLL(ci_log("%s: fd=%d driver_fd=%d epfd=%d", __FUNCTION__,
                  fd, ep->epfd_os, (int) ep->shared->epfd));
  return fd;

 fail4:
  __citp_fdtable_reserve(ep->epfd_os, 0);
  ci_sys_close(ep->epfd_os);
 fail3:
  ci_sys_close(fd);
  citp_fdtable_busy_clear(fd, fdip_unknown, 1);
 fail2:
  CITP_FDTABLE_UNLOCK();
  CI_FREE_OBJ(ep);
 fail1:
  CI_FREE_OBJ(epi);
 fail0:
  return -1;
}