Ejemplo n.º 1
0
/* Called to intialise thread-specific state, the first time a thread needs
 * to use part of the per-thread state that requires explicit
 * initialisation.
 *
 * Some members of oo_per_thread are implicitly initialised to zero either
 * because they are static data (if HAVE_CC__THREAD), else when the memory
 * is allocated.  Those members must not be reinitialised here, because
 * they may already have been used and modified.
 */
static void __oo_per_thread_init_thread(struct oo_per_thread* pt)
{
  /* It's possible that we got here because we're not initialised at all! */
  if( citp.init_level < CITP_INIT_SYSCALLS ) {
    if( _citp_do_init_inprogress == 0 )
      citp_do_init(CITP_INIT_ALL);
    else
      citp_do_init(CITP_INIT_SYSCALLS);
  }

  /* [pt->sig] is zero initialised. */

  oo_stackname_thread_init(&pt->stackname);

  pt->spinstate = 0;
#if CI_CFG_UDP
  if( CITP_OPTS.udp_recv_spin )
    pt->spinstate |= (1 << ONLOAD_SPIN_UDP_RECV);
  if( CITP_OPTS.udp_send_spin )
    pt->spinstate |= (1 << ONLOAD_SPIN_UDP_SEND);
#endif
  if( CITP_OPTS.tcp_recv_spin )
    pt->spinstate |= (1 << ONLOAD_SPIN_TCP_RECV);
  if( CITP_OPTS.tcp_send_spin )
    pt->spinstate |= (1 << ONLOAD_SPIN_TCP_SEND);
  if( CITP_OPTS.tcp_accept_spin )
    pt->spinstate |= (1 << ONLOAD_SPIN_TCP_ACCEPT);
  if( CITP_OPTS.tcp_connect_spin )
    pt->spinstate |= (1 << ONLOAD_SPIN_TCP_CONNECT);
  if( CITP_OPTS.pkt_wait_spin )
    pt->spinstate |= (1 << ONLOAD_SPIN_PKT_WAIT);
#if CI_CFG_USERSPACE_PIPE
  if( CITP_OPTS.pipe_recv_spin )
    pt->spinstate |= (1 << ONLOAD_SPIN_PIPE_RECV);
  if( CITP_OPTS.pipe_send_spin )
    pt->spinstate |= (1 << ONLOAD_SPIN_PIPE_SEND);
#endif
  if( CITP_OPTS.ul_select_spin )
    pt->spinstate |= (1 << ONLOAD_SPIN_SELECT);
  if( CITP_OPTS.ul_poll_spin )
    pt->spinstate |= (1 << ONLOAD_SPIN_POLL);
#if CI_CFG_USERSPACE_EPOLL
  if( CITP_OPTS.ul_epoll_spin )
    pt->spinstate |= (1 << ONLOAD_SPIN_EPOLL_WAIT);
#endif
  if( CITP_OPTS.sock_lock_buzz )
    pt->spinstate |= (1 << ONLOAD_SPIN_SOCK_LOCK);
  if( CITP_OPTS.stack_lock_buzz )
    pt->spinstate |= (1 << ONLOAD_SPIN_STACK_LOCK);
  if( CITP_OPTS.so_busy_poll_spin )
    pt->spinstate |= (1 << ONLOAD_SPIN_SO_BUSY_POLL);
}
Ejemplo n.º 2
0
void _init(void)
{
  /* must not do any logging yet... */
  if( citp_do_init(CITP_INIT_ALL) < 0 )
    ci_fail(("EtherFabric transport library: failed to initialise (%d)",
             citp.init_level));

  Log_S(log("citp: initialisation done."));
}
Ejemplo n.º 3
0
/* Looks up the user-level 'FD info' for a given file descriptor.
** Returns pointer to the user-level 'FD info' for a given file
** descriptor, or NULL if the FD is not user-level.
** NOTE: The reference count of the 'FD info' is incremented, the
**       caller should ensure the reference is dropped when no
**       longer needed by calling citp_fdinfo_release_ref().
*/
citp_fdinfo* citp_fdtable_lookup_noprobe(unsigned fd)
{
  /* Need to be initialised before we can try and grab the lock at the
  ** moment.  TODO: make this more efficient by using a trylock to grab the
  ** fdtable lock, and on fail see if we need to initialise it.
  */
  if( CI_UNLIKELY(citp.init_level < CITP_INIT_FDTABLE) ) {
    if (_citp_do_init_inprogress == 0)
      CI_TRY(citp_do_init(CITP_INIT_ALL));
    else
      CI_TRY(citp_do_init(CITP_INIT_FDTABLE)); /* get what we need */

    return NULL;
  }

  if( fd < citp_fdtable.inited_count ) {

    volatile citp_fdinfo_p* p_fdip = &citp_fdtable.table[fd].fdip;
    citp_fdinfo_p fdip;

  again:
    /* Swap in the busy marker. */
    fdip = *p_fdip;
    if( fdip_is_normal(fdip) ) {
      if( fdip_cas_succeed(p_fdip, fdip, fdip_busy) ) {
	/* Bump the reference count. */
	citp_fdinfo* fdi = fdip_to_fdi(fdip);
	citp_fdinfo_ref(fdi);
	/* Swap the busy marker out again. */
	citp_fdtable_busy_clear(fd, fdip, 0);
        return fdi;
      }
      goto again;
    }
    /* Not normal! */
    else if( fdip_is_busy(fdip) ) {
      citp_fdtable_busy_wait(fd, 0);
      goto again;
    }

  }

  return NULL;
}
Ejemplo n.º 4
0
void _init(void)
{
  if (getpagesize() != CI_PAGE_SIZE)
    ci_fail(("Page size mismatch, expected %u, "
             "but the current value is %u",
             CI_PAGE_SIZE, getpagesize()));
  /* must not do any logging yet... */
  if( citp_do_init(CITP_INIT_ALL) < 0 )
    ci_fail(("EtherFabric transport library: failed to initialise (%d)",
             citp.init_level));

  Log_S(log("citp: initialisation done."));
}
Ejemplo n.º 5
0
citp_fdinfo*
citp_fdtable_lookup_fast(citp_lib_context_t* ctx, unsigned fd)
{
  /* Note that if we haven't yet initialised this module, then
  ** [inited_count] will be zero, and the following test will fail.  So the
  ** test for initialisation is done further down...
  **
  ** This is highly performance critial.  DO NOT add any code between here
  ** and the first [return] statement.
  */
  citp_fdinfo* fdi;

  /* Try to avoid entering lib. */
  ctx->thread = NULL;

  if( fd < citp_fdtable.inited_count ) {
    volatile citp_fdinfo_p* p_fdip = &citp_fdtable.table[fd].fdip;
    citp_fdinfo_p fdip;

  again:
    fdip = *p_fdip;
    if( fdip_is_normal(fdip) ) {

      citp_enter_lib_if(ctx);
      if( citp_fdtable_is_mt_safe() ) {
	/* No need to use atomic ops or add a ref to the fdi when MT-safe.
         * The definition of "fds_mt_safe" is that the app does not change
         * the meaning of a file descriptor in one thread when it is being
         * used in another thread.
         */
        fdi = fdip_to_fdi(fdip);
        if( ! citp_fdinfo_is_consistent(fdi) )
          fdi = citp_reprobe_moved(fdi, CI_TRUE, CI_FALSE);

	return fdi;
      }
      else {
        /* Swap in the busy marker. */
	if( fdip_cas_succeed(p_fdip, fdip, fdip_busy) ) {
	  fdi = fdip_to_fdi(fdip);

	  ci_assert(fdi);
	  ci_assert_gt(oo_atomic_read(&fdi->ref_count), 0);
	  ci_assert(fdip_is_closing(fdip) || fdip_is_reserved(fdip) ||
		    fdi->fd == fd);
	  /* Bump the reference count. */
	  citp_fdinfo_ref(fdi);

          if( ! citp_fdinfo_is_consistent(fdi) )
            fdi = citp_reprobe_moved(fdi, CI_FALSE, CI_TRUE);
          else {
            /* Swap the busy marker out again. */
            citp_fdtable_busy_clear(fd, fdip, 0);
          }
	  return fdi;
	}
	goto again;
      }
    }

    /* Not normal! */
    if( fdip_is_passthru(fdip) )
      return NULL;

    citp_enter_lib_if(ctx);
    if( fdip_is_busy(fdip) ) {
      citp_fdtable_busy_wait(fd, 0);
      goto again;
    }

    ci_assert(fdip_is_unknown(fdip));
    goto probe;
  }

  if( citp.init_level < CITP_INIT_FDTABLE ) {
    if( _citp_do_init_inprogress == 0 )
      CI_TRY(citp_do_init(CITP_INIT_ALL));
    else
      CI_TRY(citp_do_init(CITP_INIT_FDTABLE)); /* get what we need */
  }

  if( fd >= citp_fdtable.size )
    return NULL;

 probe:
  citp_enter_lib_if(ctx);
  fdi = citp_fdtable_probe(fd);
  if( fdi && citp_fdtable_is_mt_safe() )
    citp_fdinfo_release_ref(fdi, 0);
  return fdi;
}
Ejemplo n.º 6
0
citp_fdinfo *
citp_fdtable_lookup(unsigned fd)
{
  /* Note that if we haven't yet initialised this module, then
  ** [inited_count] will be zero, and the following test will fail.  So the
  ** test for initialisation is done further down...
  **
  ** This is highly performance critial.  DO NOT add any code between here
  ** and the first [return] statement.
  */
  citp_fdinfo* fdi;

  /* In some cases, we'll lock fdtable.  Assert that it is possible: */
  ci_assert(oo_per_thread_get()->sig.inside_lib);

  if( fd < citp_fdtable.inited_count ) {

    volatile citp_fdinfo_p* p_fdip = &citp_fdtable.table[fd].fdip;
    citp_fdinfo_p fdip;

  again:
    /* Swap in the busy marker. */
    fdip = *p_fdip;

    if( fdip_is_normal(fdip) ) {
      if( citp_fdtable_not_mt_safe() ) {
	if( fdip_cas_succeed(p_fdip, fdip, fdip_busy) ) {
	  fdi = fdip_to_fdi(fdip);
	  ci_assert(fdi);
	  ci_assert_gt(oo_atomic_read(&fdi->ref_count), 0);
	  ci_assert(fdip_is_closing(fdip) || fdip_is_reserved(fdip) ||
		    fdi->fd == fd);
	  /* Bump the reference count. */
	  citp_fdinfo_ref(fdi);

          if( ! citp_fdinfo_is_consistent(fdi) ) {
            /* Something is wrong.  Re-probe. */
            fdi = citp_reprobe_moved(fdi, CI_FALSE, CI_TRUE);
          }
          else {
            /* Swap the busy marker out again. */
            citp_fdtable_busy_clear(fd, fdip, 0);
          }
	  return fdi;
	}
	goto again;
      }
      else {
	/* No need to use atomic ops when single-threaded.  The definition
         * of "fds_mt_safe" is that the app does not change the meaning of
         * a file descriptor in one thread when it is being used in another
         * thread.  In that case I'm hoping this should be safe, but at
         * time of writing I'm really not confident.  (FIXME).
         */
	fdi = fdip_to_fdi(fdip);
        if( ci_is_multithreaded() )
	  citp_fdinfo_ref(fdi);
        else
          ++fdi->ref_count.n;

        if( ! citp_fdinfo_is_consistent(fdi) )
          fdi = citp_reprobe_moved(fdi, CI_FALSE, CI_FALSE);

	return fdi;
      }
    }

    /* Not normal! */
    if( fdip_is_passthru(fdip) )  return NULL;

    if( fdip_is_busy(fdip) ) {
      citp_fdtable_busy_wait(fd, 0);
      goto again;
    }

    ci_assert(fdip_is_unknown(fdip));
    goto probe;
  }

  if (citp.init_level < CITP_INIT_FDTABLE) {
    if (_citp_do_init_inprogress == 0)
      CI_TRY(citp_do_init(CITP_INIT_ALL));
    else
      CI_TRY(citp_do_init(CITP_INIT_FDTABLE)); /* get what we need */
  }

  if( fd >= citp_fdtable.size )  return NULL;

 probe:
  fdi = citp_fdtable_probe(fd);

  return fdi;
}