コード例 #1
0
static void
efab_eplock_record_pid(ci_netif *ni)
{
    ci_irqlock_state_t lock_flags;
    int i;
    int index = -1;

    int pid = current->pid;

    ci_irqlock_lock(&ni->eplock_helper.pids_lock, &lock_flags);
    for (i=0; i < EFAB_EPLOCK_MAX_NO_PIDS; i++) {

        if (ni->eplock_helper.pids_who_waited[i] == pid) {
            ni->eplock_helper.pids_no_waits[i]++;
            ci_irqlock_unlock(&ni->eplock_helper.pids_lock, &lock_flags);

            return;
        }

        if ((rs->pids_who_waited[i] == 0) && (index < 0)) {
            index = i;
        }
    }
    if (index >= 0) {
        ni->eplock_helper.pids_who_waited[index] = pid;
        ni->eplock_helper.pids_no_waits[index] = 1;
    }
    ci_irqlock_unlock(&ni->eplock_helper.pids_lock, &lock_flags);
}
コード例 #2
0
ファイル: id_pool.c プロジェクト: majek/openonload
int
ci_id_pool_free(ci_id_pool_t* idp, unsigned id, ci_irqlock_t* lock)
{
  ci_irqlock_state_t lock_flags;
  unsigned current_size;
  int rc;

  while( 1 ) {
    ci_irqlock_lock(lock, &lock_flags);

    if( !ci_fifo2_is_full(&idp->free_ids) ) {
      ci_fifo2_put(&idp->free_ids, (int) id);
      ci_irqlock_unlock(lock, &lock_flags);
      return 0;
    }

    current_size = ci_fifo2_buf_size(&idp->free_ids);
    ci_irqlock_unlock(lock, &lock_flags);

    if (ci_in_atomic()) {
      ci_log("ci_id_pool: ci_in_atomic in ci_id_pool_free()");
      return -ENOMEM;
    }

    ci_fifo2_grow_lock_a(&idp->free_ids, current_size, lock, ci_vmalloc_fn,
			 ci_vfree, &rc);
    if( rc < 0 )  return rc;
  }
}
コード例 #3
0
/* Look for a suitable stack within the cluster.
 *
 * You need to efab_thr_release() the stack returned by this function
 * when done.
 *
 * You must hold the thc_mutex before calling this function.
 *
 * You cannot hold the THR_TABLE.lock when calling this function.
 */
static int thc_get_thr(tcp_helper_cluster_t* thc,
                       struct oof_socket* oofilter,
                       tcp_helper_resource_t** thr_out)
{
  tcp_helper_resource_t* thr_walk;
  ci_irqlock_state_t lock_flags;
  struct oof_manager* fm = efab_tcp_driver.filter_manager;
  /* Search for a suitable stack within the thc.  A suitable stack has
   * the same tid as current and we could associate our filter with it.
   * Or in other words does not have a socket filter installed
   * (dummy or not) with the same protocol:port_be16[:addr_be32]
   */
  /* Iterating over list of stacks, make sure they don't change. */
  ci_irqlock_lock(&THR_TABLE.lock, &lock_flags);
  thr_walk = thc->thc_thr_head;
  while( thr_walk != NULL ) {
    if( thr_walk->thc_tid == current->pid ) {
      if( oof_socket_can_update_stack(fm, oofilter, thr_walk) ) {
        efab_thr_ref(thr_walk);
        *thr_out = thr_walk;
        ci_irqlock_unlock(&THR_TABLE.lock, &lock_flags);
        return 0;
      }
    }
    thr_walk = thr_walk->thc_thr_next;
  }
  ci_irqlock_unlock(&THR_TABLE.lock, &lock_flags);
  return 1;
}
コード例 #4
0
ファイル: ipid.c プロジェクト: ido/openonload
extern int efab_ipid_alloc(efab_ipid_cb_t* ipid)
{
  int i;
  int rv;
  ci_irqlock_state_t lock_flags;

  ci_assert( ipid->init == EFAB_IPID_INIT );
  ci_irqlock_lock( &ipid->lock, &lock_flags );

  /* go find an unused block */
  for( i = 0; i <= CI_IPID_BLOCK_COUNT; i++ ) {
    if( !ipid->range[i] ) {
      ipid->range[i]++;
      rv = CI_IPID_MIN + (i << CI_IPID_BLOCK_SHIFT);
      ci_assert((rv >= CI_IPID_MIN) &&
                (rv <= CI_IPID_MAX - CI_IPID_BLOCK_LENGTH + 1));
      goto alloc_exit;
    } else {
      ci_assert( ipid->range[i] == 1 );
    }
  }
  /* !!Out of blocks!! */
  rv = -ENOMEM;

 alloc_exit:
  ci_irqlock_unlock( &ipid->lock, &lock_flags );
  return rv;
}
コード例 #5
0
ファイル: waitable.c プロジェクト: bmschwa/openonload
static void ci_drop_orphan(ci_netif * ni)
{
  ci_irqlock_state_t lock_flags;
  tcp_helper_resource_t* trs;
  int dec_needed; 

  /* Called when connection closes AFTER the file descriptor closes
   *  - in kernel mode, if user mode has gone away, we call
   *    efab_tcp_helper_k_ref_count_dec() to decrement count
   *    of such connections so we can free the stack when
   *    they've all gone away.
   */
  if( ni->flags & CI_NETIF_FLAGS_DROP_SOCK_REFS ) {
    trs = netif2tcp_helper_resource(ni);
    dec_needed = 0;

    ci_irqlock_lock(&trs->lock, &lock_flags);
    if( trs->n_ep_closing_refs > 0 ) {
      --trs->n_ep_closing_refs;
      dec_needed = 1;
    }
    ci_irqlock_unlock(&trs->lock, &lock_flags);

    if( dec_needed )
      efab_tcp_helper_k_ref_count_dec(trs, 0);
  }
}
コード例 #6
0
static void thc_dump_fn(void* not_used, oo_dump_log_fn_t log, void* log_arg)
{
  ci_irqlock_state_t lock_flags;
  tcp_helper_cluster_t* walk;
  int cnt = 0;

  /* Iterating over list of stacks, make sure they don't change. */
  mutex_lock(&thc_mutex);
  ci_irqlock_lock(&THR_TABLE.lock, &lock_flags);

  walk = thc_head;
  while( walk != NULL ) {
    int hwports = 0;
    int i;
    for( i = 0; i < CI_CFG_MAX_REGISTER_INTERFACES; ++i )
      if( walk->thc_vi_set[i] != NULL )
        hwports |= (1 << i);
    log(log_arg, "--------------------------------------------------------");
    log(log_arg, "%d: name=%s  size=%d  euid=%d flags=%d hwports=0x%x", cnt++,
        walk->thc_name, walk->thc_cluster_size, walk->thc_euid,
        walk->thc_flags, hwports);
    thc_dump_thrs(walk, log, log_arg);
    walk = walk->thc_next;
  }

  ci_irqlock_unlock(&THR_TABLE.lock, &lock_flags);
  mutex_unlock(&thc_mutex);
}
コード例 #7
0
/* If the thc has any orphan stacks, return one of them. */
static int thc_get_an_orphan(tcp_helper_cluster_t* thc,
                             tcp_helper_resource_t** thr_out)
{
  tcp_helper_resource_t* thr_walk;
  int rc = -1;
  ci_irqlock_state_t lock_flags;
  /* Iterating over list of stacks, make sure they don't change. */
  ci_irqlock_lock(&THR_TABLE.lock, &lock_flags);
  thr_walk = thc->thc_thr_head;
  while( thr_walk ) {
    if( thr_walk->k_ref_count & TCP_HELPER_K_RC_NO_USERLAND ) {
      rc = 0;
      *thr_out = thr_walk;
      break;
    }
    thr_walk = thr_walk->thc_thr_next;
  }
  ci_irqlock_unlock(&THR_TABLE.lock, &lock_flags);
  return rc;
}
コード例 #8
0
ファイル: ipid.c プロジェクト: ido/openonload
int
efab_ipid_free(efab_ipid_cb_t* ipid, int base )
{
  int i;
  ci_irqlock_state_t lock_flags;

  ci_assert( ipid->init == EFAB_IPID_INIT );

  if(  (base & CI_IPID_BLOCK_MASK) != 0 )
    return -EINVAL;  /* not actually on a block boundary */

  ci_assert((base >= CI_IPID_MIN) && 
            (base <= CI_IPID_MAX - CI_IPID_BLOCK_LENGTH + 1));

  ci_irqlock_lock( &ipid->lock, &lock_flags );
  i = (base - CI_IPID_MIN) >> CI_IPID_BLOCK_SHIFT;
  ci_assert( ipid->range[i] == 1 );
  ipid->range[i] = 0;
  ci_irqlock_unlock( &ipid->lock, &lock_flags );
  return 0;
}
コード例 #9
0
ファイル: tcp_helper_ioctl.c プロジェクト: ido/openonload
static int
efab_tcp_helper_sock_attach(ci_private_t* priv, void *arg)
{
  oo_sock_attach_t* op = arg;
  tcp_helper_resource_t* trs = priv->thr;
  tcp_helper_endpoint_t* ep = NULL;
  citp_waitable_obj *wo;
  int rc, flags, type = op->type;

/* SOCK_CLOEXEC and SOCK_NONBLOCK exist from 2.6.27 both */
#ifdef SOCK_TYPE_MASK
  BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC);
  flags = type & (SOCK_CLOEXEC | SOCK_NONBLOCK);
  type &= SOCK_TYPE_MASK;
# ifdef SOCK_NONBLOCK
    if( SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK) )
      flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
# endif
#else
  flags = 0;
#endif

  OO_DEBUG_TCPH(ci_log("%s: ep_id=%d", __FUNCTION__, op->ep_id));
  if( trs == NULL ) {
    LOG_E(ci_log("%s: ERROR: not attached to a stack", __FUNCTION__));
    return -EINVAL;
  }

  /* Validate and find the endpoint. */
  if( ! IS_VALID_SOCK_P(&trs->netif, op->ep_id) )
    return -EINVAL;
  ep = ci_trs_get_valid_ep(trs, op->ep_id);
  if( tcp_helper_endpoint_set_aflags(ep, OO_THR_EP_AFLAG_ATTACHED) &
      OO_THR_EP_AFLAG_ATTACHED )
    return -EBUSY;
  wo = SP_TO_WAITABLE_OBJ(&trs->netif, ep->id);

  /* create OS socket */
  if( op->domain != AF_UNSPEC ) {
    struct socket *sock;
    struct file *os_file;

    rc = sock_create(op->domain, type, 0, &sock);
    if( rc < 0 ) {
      LOG_E(ci_log("%s: ERROR: sock_create(%d, %d, 0) failed (%d)",
                   __FUNCTION__, op->domain, type, rc));
      tcp_helper_endpoint_clear_aflags(ep, OO_THR_EP_AFLAG_ATTACHED);
      return rc;
    }
    os_file = sock_alloc_file(sock, flags, NULL);
    if( IS_ERR(os_file) ) {
      LOG_E(ci_log("%s: ERROR: sock_alloc_file failed (%ld)",
                   __FUNCTION__, PTR_ERR(os_file)));
      sock_release(sock);
      tcp_helper_endpoint_clear_aflags(ep, OO_THR_EP_AFLAG_ATTACHED);
      return PTR_ERR(os_file);
    }
    rc = efab_attach_os_socket(ep, os_file);
    if( rc < 0 ) {
      LOG_E(ci_log("%s: ERROR: efab_attach_os_socket failed (%d)",
                   __FUNCTION__, rc));
      /* NB. efab_attach_os_socket() consumes [os_file] even on error. */
      tcp_helper_endpoint_clear_aflags(ep, OO_THR_EP_AFLAG_ATTACHED);
      return rc;
    }
    wo->sock.domain = op->domain;
    wo->sock.ino = ep->os_socket->file->f_dentry->d_inode->i_ino;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
    wo->sock.uid = ep->os_socket->file->f_dentry->d_inode->i_uid;
#else
    wo->sock.uid = __kuid_val(ep->os_socket->file->f_dentry->d_inode->i_uid);
#endif
  }

  /* Create a new file descriptor to attach the stack to. */
  ci_assert((wo->waitable.state & CI_TCP_STATE_TCP) ||
            wo->waitable.state == CI_TCP_STATE_UDP);
  rc = oo_create_fd(ep, flags,
                    (wo->waitable.state & CI_TCP_STATE_TCP) ?
                    CI_PRIV_TYPE_TCP_EP : CI_PRIV_TYPE_UDP_EP);
  if( rc < 0 ) {
    ci_irqlock_state_t lock_flags;
    struct oo_file_ref* os_socket;
    ci_irqlock_lock(&ep->thr->lock, &lock_flags);
    os_socket = ep->os_socket;
    ep->os_socket = NULL;
    ci_irqlock_unlock(&ep->thr->lock, &lock_flags);
    if( os_socket != NULL )
      oo_file_ref_drop(os_socket);
    tcp_helper_endpoint_clear_aflags(ep, OO_THR_EP_AFLAG_ATTACHED);
    return rc;
  }

  op->fd = rc;
#ifdef SOCK_NONBLOCK
  if( op->type & SOCK_NONBLOCK )
    ci_bit_mask_set(&wo->waitable.sb_aflags, CI_SB_AFLAG_O_NONBLOCK);
#endif

  /* Re-read the OS socket buffer size settings.  This ensures we'll use
   * up-to-date values for this new socket.
   */
  efab_get_os_settings(&NI_OPTS_TRS(trs));
  return 0;
}
コード例 #10
0
ファイル: tcp_connect.c プロジェクト: davenso/openonload
/* c_ni is assumed to be locked on enterance and is always unlocked on
 * exit. */
int ci_tcp_connect_lo_toconn(ci_netif *c_ni, oo_sp c_id, ci_uint32 dst,
                             ci_netif *l_ni, oo_sp l_id)
{
  ci_tcp_state *ts;
  ci_tcp_socket_listen *tls, *alien_tls;
  citp_waitable_obj *wo;
  citp_waitable *w;
  int rc;

  ci_assert(ci_netif_is_locked(c_ni));
  ci_assert(OO_SP_NOT_NULL(c_id));
  ci_assert(OO_SP_NOT_NULL(l_id));

  LOG_TC(log("%s: connect %d:%d to %d:%d", __FUNCTION__,
             c_ni->state->stack_id, OO_SP_TO_INT(c_id),
             l_ni->state->stack_id, OO_SP_TO_INT(l_id)));

  alien_tls = SP_TO_TCP_LISTEN(l_ni, l_id);
  if( (int)ci_tcp_acceptq_n(alien_tls) >= alien_tls->acceptq_max ) {
    ci_netif_unlock(c_ni);
    return -EBUSY;
  }

  /* In c_ni, create shadow listening socket tls (copy l_id) */
  ts = ci_tcp_get_state_buf(c_ni);
  if( ts == NULL ) {
    ci_netif_unlock(c_ni);
    LOG_E(ci_log("%s: [%d] out of socket buffers", __FUNCTION__, NI_ID(c_ni)));
    return -ENOMEM;
  }

  /* init common tcp fields */
  ts->s.so = alien_tls->s.so;
  ts->s.cp.ip_ttl = alien_tls->s.cp.ip_ttl;
  S_TCP_HDR(&ts->s)->tcp_source_be16 =
      S_TCP_HDR(&alien_tls->s)->tcp_source_be16;
  ts->s.domain = alien_tls->s.domain;
  ts->c = alien_tls->c;
  ts->c.tcp_defer_accept = OO_TCP_DEFER_ACCEPT_OFF;

  /* make sure nobody will ever connect to our "shadow" socket
   * except us */
  ci_bit_set(&ts->s.b.sb_aflags, CI_SB_AFLAG_ORPHAN_BIT);

  ci_tcp_set_slow_state(c_ni, ts, CI_TCP_LISTEN);
  tls = SOCK_TO_TCP_LISTEN(&ts->s);
  /* no timer: */
  tls->s.s_flags = alien_tls->s.s_flags | CI_SOCK_FLAG_BOUND_ALIEN;

  tls->acceptq_max = 1;
  rc = ci_tcp_listen_init(c_ni, tls);
  if( rc != 0 ) {
    citp_waitable_obj_free(c_ni, &tls->s.b);
    return rc;
  }

  /* Connect c_id to tls */
  ts = SP_TO_TCP(c_ni, c_id);
  rc = ci_tcp_connect_lo_samestack(c_ni, ts, tls->s.b.bufid);

  /* Accept as from tls */
  if( !ci_tcp_acceptq_not_empty(tls) ) {
    /* it is possible, for example, if ci_tcp_listenq_try_promote() failed
     * because there are no endpoints */
    ci_tcp_listenq_drop_all(c_ni, tls);
    citp_waitable_obj_free(c_ni, &tls->s.b);
    ci_netif_unlock(c_ni);
    return -EBUSY;
  }
  w = ci_tcp_acceptq_get(c_ni, tls);
  ci_assert(w);
  LOG_TV(ci_log("%s: %d:%d to %d:%d shadow %d:%d accepted %d:%d",
                __FUNCTION__,
                c_ni->state->stack_id, OO_SP_TO_INT(c_id),
                l_ni->state->stack_id, OO_SP_TO_INT(l_id),
                c_ni->state->stack_id, tls->s.b.bufid,
                c_ni->state->stack_id, w->bufid));

  ci_assert(w->state & CI_TCP_STATE_TCP);
  ci_assert(w->state != CI_TCP_LISTEN);

  /* Destroy tls.
   * NB: nobody could possibly connect to it, so no need to do proper
   * shutdown.
   */
  ci_assert_equal(ci_tcp_acceptq_n(tls), 0);
  ci_tcp_listenq_drop_all(c_ni, tls);
  citp_waitable_obj_free(c_ni, &tls->s.b);
  ci_netif_unlock(c_ni);

  /* Keep a port reference */
  {
    tcp_helper_endpoint_t *l_ep, *a_ep;
    struct oo_file_ref* os_sock_ref;
    ci_irqlock_state_t lock_flags;

    l_ep = ci_trs_ep_get(netif2tcp_helper_resource(l_ni), l_id);
    a_ep = ci_trs_ep_get(netif2tcp_helper_resource(c_ni), W_SP(w));
    ci_irqlock_lock(&l_ep->thr->lock, &lock_flags);
    os_sock_ref = l_ep->os_socket;
    ci_assert_equal(a_ep->os_port_keeper, NULL);
    if( os_sock_ref != NULL ) {
      os_sock_ref = oo_file_ref_add(os_sock_ref);
      os_sock_ref = oo_file_ref_xchg(&a_ep->os_port_keeper, os_sock_ref);
      ci_irqlock_unlock(&l_ep->thr->lock, &lock_flags);
      if( os_sock_ref != NULL )
        oo_file_ref_drop(os_sock_ref);
    }
    else {
      ci_irqlock_unlock(&l_ep->thr->lock, &lock_flags);
      goto cleanup;
    }
  }

  /* lock l_ni: Check that l_id is the same socket it used to be */
  /* create ref-sock in l_ni, put it into acc q */
  if( ci_netif_lock(l_ni) != 0 )
    goto cleanup;
  if( alien_tls->s.b.state != CI_TCP_LISTEN ||
      (alien_tls->s.b.sb_aflags & CI_SB_AFLAG_ORPHAN) ||
      S_TCP_HDR(&alien_tls->s)->tcp_source_be16 != TS_TCP(ts)->tcp_dest_be16 ||
      (alien_tls->s.pkt.ip.ip_saddr_be32 != INADDR_ANY &&
       alien_tls->s.pkt.ip.ip_saddr_be32 != ts->s.pkt.ip.ip_daddr_be32) ) {
    ci_netif_unlock(l_ni);
    goto cleanup;
  }

  ci_bit_mask_set(&w->sb_aflags,
                  CI_SB_AFLAG_TCP_IN_ACCEPTQ | CI_SB_AFLAG_ORPHAN);

  wo = citp_waitable_obj_alloc(l_ni);
  if( wo == NULL ) {
    ci_netif_unlock(l_ni);
    goto cleanup;
  }
  wo->waitable.state = CI_TCP_CLOSED;
  wo->waitable.sb_aflags |= CI_SB_AFLAG_MOVED_AWAY;
  wo->waitable.moved_to_stack_id = c_ni->state->stack_id;
  wo->waitable.moved_to_sock_id = W_SP(w);
  LOG_TC(log("%s: put to acceptq %d:%d referencing %d:%d", __func__,
             l_ni->state->stack_id, OO_SP_TO_INT(W_SP(&wo->waitable)),
             c_ni->state->stack_id, OO_SP_TO_INT(W_SP(w))));

  ci_tcp_acceptq_put(l_ni, alien_tls, &wo->waitable);
  citp_waitable_wake_not_in_poll(l_ni, &alien_tls->s.b, CI_SB_FLAG_WAKE_RX);
  ci_netif_unlock(l_ni);

  return rc;

cleanup:
  ci_assert(w->sb_aflags & CI_SB_AFLAG_ORPHAN);
  ci_bit_mask_clear(&w->sb_aflags,
                    CI_SB_AFLAG_TCP_IN_ACCEPTQ | CI_SB_AFLAG_ORPHAN);
  efab_tcp_helper_close_endpoint(netif2tcp_helper_resource(c_ni), w->bufid);
  /* we can not guarantee c_ni lock, so we can' call
   * ci_tcp_drop(c_ni, ts).  So, we return error; UL will handover
   * and close ts endpoint. */
  return -EBUSY;
}