示例#1
0
/* Function to insert the current process's MM into the hash-table --
 * necessary to trampoline syscalls back into the user library.  The
 * trampoline_entry parameter is the address in the library we'll
 * trampoline to.
 *
 * Returns zero on success, or -ve error code on failure. 
 */
int
efab_linux_trampoline_register (ci_private_t *priv, void *arg)
{
  const ci_tramp_reg_args_t *args = arg;
  int rc = 0;
  struct mm_hash *p;

  TRAMP_DEBUG ("Register entry-point 0x%"CI_PRIx64"(0x%"CI_PRIx64") for mm %p (pid %d)",
               args->trampoline_entry.ptr, args->trampoline_exclude.ptr, current->mm,
               current->pid);

  write_lock (&oo_mm_tbl_lock);

  p = oo_mm_tbl_lookup(current->mm);
  /* Either we found the entry on the hash table already, or we just created
   * one.  Either way, update the trampoline entry-point
   */
  if (!p) {
    /* This means current mm is not in the hash, implying that client
     * hasn't mmapped anything yet.  Fail the request since we have no
     * way of cleaning up.
     */
    ci_log("Unexpected trampoline registration with no maps");
    rc = -ENOENT;
    goto exit;
  }
  ci_assert (p);
  ci_assert (p->mm == current->mm);
  p->trampoline_entry = args->trampoline_entry;
  p->trampoline_exclude = args->trampoline_exclude;
  p->trampoline_toc = args->trampoline_toc;
  p->trampoline_user_fixup = args->trampoline_user_fixup;
  CI_DEBUG(p->trampoline_ul_fail = args->trampoline_ul_fail;)
示例#2
0
/* Free a thc.
 *
 * You must hold the thc_mutex before calling this function.
 */
static void thc_cluster_free(tcp_helper_cluster_t* thc)
{
  int i;
  tcp_helper_cluster_t *thc_walk, *thc_prev;

  thc_uninstall_tproxy(thc);

  /* Free up resources within the thc */
  for( i = 0; i < CI_CFG_MAX_REGISTER_INTERFACES; ++i )
    if( thc->thc_vi_set[i] != NULL )
      efrm_vi_set_release(thc->thc_vi_set[i]);

  ci_assert(ci_dllist_is_empty(&thc->thc_tlos));

  /* Remove from the thc_head list */
  thc_walk = thc_head;
  thc_prev = NULL;
  while( thc_walk != NULL ) {
    if( thc_walk == thc ) {
      if( thc_walk == thc_head ) {
        ci_assert_equal(thc_prev, NULL);
        thc_head = thc_walk->thc_next;
      }
      else {
        thc_prev->thc_next = thc_walk->thc_next;
      }
      kfree(thc_walk);
      return;
    }
    thc_prev = thc_walk;
    thc_walk = thc_walk->thc_next;
  }
  ci_assert(0);
}
示例#3
0
int ci_parse_eth_addr(void* eth_mac_addr, const char* str, char sep)
{
  unsigned b1, b2, b3, b4, b5, b6;
  unsigned char* p;

  ci_assert(eth_mac_addr);
  ci_assert(str);

  if( strlen(str) < 17 )  return -1;

  if( sep ) {
    char fmt[] = "%02x:%02x:%02x:%02x:%02x:%02x";
    fmt[4] = fmt[9] = fmt[14] = fmt[19] = fmt[24] = sep;
    if( ci_sscanf(str, fmt, &b1, &b2, &b3, &b4, &b5, &b6) != 6 )
      return -1;
  }
  else {
    if( ci_sscanf(str, "%02x%c%02x%c%02x%c%02x%c%02x%c%02x",
	       &b1, &sep, &b2, &sep, &b3, &sep,
	       &b4, &sep, &b5, &sep, &b6) != 11 )
      return -1;
  }

  p = (unsigned char*) eth_mac_addr;
  p[0] = (unsigned char) b1;
  p[1] = (unsigned char) b2;
  p[2] = (unsigned char) b3;
  p[3] = (unsigned char) b4;
  p[4] = (unsigned char) b5;
  p[5] = (unsigned char) b6;

  return 0;
}
示例#4
0
文件: ipid.c 项目: ido/openonload
extern int efab_ipid_alloc(efab_ipid_cb_t* ipid)
{
  int i;
  int rv;
  ci_irqlock_state_t lock_flags;

  ci_assert( ipid->init == EFAB_IPID_INIT );
  ci_irqlock_lock( &ipid->lock, &lock_flags );

  /* go find an unused block */
  for( i = 0; i <= CI_IPID_BLOCK_COUNT; i++ ) {
    if( !ipid->range[i] ) {
      ipid->range[i]++;
      rv = CI_IPID_MIN + (i << CI_IPID_BLOCK_SHIFT);
      ci_assert((rv >= CI_IPID_MIN) &&
                (rv <= CI_IPID_MAX - CI_IPID_BLOCK_LENGTH + 1));
      goto alloc_exit;
    } else {
      ci_assert( ipid->range[i] == 1 );
    }
  }
  /* !!Out of blocks!! */
  rv = -ENOMEM;

 alloc_exit:
  ci_irqlock_unlock( &ipid->lock, &lock_flags );
  return rv;
}
示例#5
0
/*! Mark a signal as pending.
 * Should be called from signal handler only.
 *
** \param  signum   Signal number
** \param  info     Saved info for sa_sigaction handler
** \param  context  Saved context for sa_sigaction handler
** \param  our_info Our signal info
*/
ci_inline void citp_signal_set_pending(int signum, siginfo_t *info,
                                       void *context,
                                       citp_signal_info *our_info)
{
  int i;

  ci_assert(our_info->inside_lib);

  for( i = 0; i < OO_SIGNAL_MAX_PENDING; i++ ) {
    if( our_info->signals[i].signum )
      continue;
    if( ci_cas32_fail(&our_info->signals[i].signum, 0, signum) )
      continue;
    LOG_SIG(log("%s: signal %d pending", __FUNCTION__, signum));
    ci_assert(info);
    ci_assert(context);
    memcpy(&our_info->signals[i].saved_info, info, sizeof(siginfo_t));
    our_info->signals[i].saved_context = context;

    /* Hack: in case of SA_ONESHOT, make sure that we intercept
     * the signal.  At the end of citp_signal_run_app_handler,
     * we will reset the signal handler properly. */
    if( citp_signal_data[signum-1].flags & SA_ONESHOT )
      sigaction(signum, NULL, NULL);

    ci_atomic32_or(&our_info->aflags, OO_SIGNAL_FLAG_HAVE_PENDING);
    return;
  }

  log("%s: no empty slot to set pending signal %d", __FUNCTION__, signum);
}
示例#6
0
int
ci_id_pool_ctor(ci_id_pool_t* idp, int max_num_ids, int init_size)
{
  int fifo_size;

  ci_assert(idp);
  ci_assert(init_size >= 0);
  ci_assert(max_num_ids == -1 || init_size <= max_num_ids);

  if( init_size == 0 )  fifo_size = 5;
  else                  fifo_size = ci_log2_le(init_size) + 1;
  fifo_size = ci_pow2(fifo_size);

  idp->next_id = 0;
  idp->max_num_ids = max_num_ids;

  /* Want [free_ids] to be 2^x in size, as more efficient that way. */
  idp->free_ids.fifo = (int*) ci_vmalloc(fifo_size * sizeof(int));
  ci_fifo2_init(&idp->free_ids, fifo_size);

  while( init_size-- ) {
    ci_assert(!ci_fifo2_is_full(&idp->free_ids));
    ci_fifo2_put(&idp->free_ids, (int) idp->next_id++);
  }

  return 0;
}
示例#7
0
static void
__ci_netif_filter_remove(ci_netif* ni, unsigned hash1,
                         unsigned hash2, int hops, unsigned last_tbl_i)
{
  ci_netif_filter_table* tbl = ni->filter_table;
  ci_netif_filter_table_entry* entry;
  unsigned tbl_i;
  int i;

  tbl_i = hash1;
  for( i = 0; i < hops; ++i ) {
    entry = &tbl->table[tbl_i];
    ci_assert(entry->id != EMPTY);
    ci_assert(entry->route_count > 0);
    if( --entry->route_count == 0 && entry->id == TOMBSTONE ) {
      CITP_STATS_NETIF(--ni->state->stats.table_n_slots);
      entry->id = EMPTY;
    }
    tbl_i = (tbl_i + hash2) & tbl->table_size_mask;
  }
  ci_assert(tbl_i == last_tbl_i);

  CITP_STATS_NETIF(--ni->state->stats.table_n_entries);
  entry = &tbl->table[tbl_i];
  if( entry->route_count == 0 ) {
    CITP_STATS_NETIF(--ni->state->stats.table_n_slots);
    entry->id = EMPTY;
  }
  else {
    entry->id = TOMBSTONE;
  }
}
示例#8
0
static int
efab_tcp_drop_from_acceptq(ci_private_t *priv, void *arg)
{
  struct oo_op_tcp_drop_from_acceptq *carg = arg;
  tcp_helper_resource_t *thr;
  tcp_helper_endpoint_t *ep;
  citp_waitable *w;
  ci_tcp_state *ts;
  int rc = -EINVAL;

  /* find stack */
  rc = efab_thr_table_lookup(NULL, carg->stack_id,
                                 EFAB_THR_TABLE_LOOKUP_CHECK_USER |
                                 EFAB_THR_TABLE_LOOKUP_NO_UL,
                                 &thr);

  if( rc < 0 )
    return rc;
  ci_assert( thr->k_ref_count & TCP_HELPER_K_RC_NO_USERLAND );

  /* find endpoint and drop OS socket */
  ep = ci_trs_get_valid_ep(thr, carg->sock_id);
  if( ep == NULL )
    goto fail1;

  w = SP_TO_WAITABLE(&thr->netif, carg->sock_id);
  if( !(w->state & CI_TCP_STATE_TCP) || w->state == CI_TCP_LISTEN )
    goto fail2;
  ts = SP_TO_TCP(&thr->netif, carg->sock_id);
  ci_assert(ep->os_port_keeper);
  ci_assert_equal(ep->os_socket, NULL);

  LOG_TV(ci_log("%s: send reset to non-accepted connection", __FUNCTION__));

  /* copy from ci_tcp_listen_shutdown_queues() */
  ci_assert(ts->s.b.sb_aflags & CI_SB_AFLAG_TCP_IN_ACCEPTQ);
  rc = ci_netif_lock(&thr->netif);
  if( rc != 0 ) {
    ci_assert_equal(rc, -EINTR);
    rc = -ERESTARTSYS;
    goto fail2;
  }
  ci_bit_clear(&ts->s.b.sb_aflags, CI_SB_AFLAG_TCP_IN_ACCEPTQ_BIT);
  /* We have no way to close this connection from the other side:
   * there was no RST from peer. */
  ci_assert_nequal(ts->s.b.state, CI_TCP_CLOSED);
  ci_assert_nequal(ts->s.b.state, CI_TCP_TIME_WAIT);
  ci_tcp_send_rst(&thr->netif, ts);
  ci_tcp_drop(&thr->netif, ts, ECONNRESET);
  ci_assert_equal(ep->os_port_keeper, NULL);
  ci_netif_unlock(&thr->netif);
  efab_tcp_helper_k_ref_count_dec(thr, 1);
  return 0;

fail1:
  efab_thr_release(thr);
fail2:
  ci_log("%s: inconsistent ep %d:%d", __func__, carg->stack_id, carg->sock_id);
  return rc;
}
示例#9
0
int __ci_udp_shutdown(ci_netif* netif, ci_udp_state* us, int how)
{
    ci_assert(netif);
    ci_assert(us);

    /* On Windows you can shutdown socket even if it is not connected */
    if( udp_raddr_be32(us) == 0 )
        return -ENOTCONN;
    /* Maybe ESHUTDOWN is suitable, but Linux returns EPIPE */
    switch( how ) {
    case SHUT_RD:
        us->s.rx_errno |= CI_SHUT_RD;
        break;
    case SHUT_WR:
        us->s.rx_errno |= CI_SHUT_WR;
        us->s.tx_errno = EPIPE;
        break;
    case SHUT_RDWR:
        us->s.rx_errno |= (CI_SHUT_RD | CI_SHUT_WR);
        us->s.tx_errno = EPIPE;
        ci_assert(UDP_IS_SHUT_RDWR(us));
        break;
    default:
        ci_fail(("'how' parameter of shutdown() must be verified earlier"));
        return -EINVAL;
    }
    /* shutdown() must not disconnect */
    return 0;
}
示例#10
0
static int ci_udp_set_filters(citp_socket* ep, ci_udp_state* us)
{
    int rc;

    ci_assert(ep);
    ci_assert(us);

    if( udp_lport_be16(us) == 0 )
        return 0;

    rc = ci_tcp_ep_set_filters(ep->netif, S_SP(us), us->s.cp.so_bindtodevice,
                               OO_SP_NULL);
    if( rc == -EFILTERSSOME ) {
        if( CITP_OPTS.no_fail )
            rc = 0;
        else {
            ci_tcp_ep_clear_filters(ep->netif, S_SP(us), 0);
            rc = -ENOBUFS;
        }
    }
    if( rc < 0 ) {
        LOG_UC(log(FNS_FMT "ci_tcp_ep_set_filters failed (%d)",
                   FNS_PRI_ARGS(ep->netif, ep->s), -rc));
        CI_SET_ERROR(rc, -rc);
        return rc;
    }
    UDP_SET_FLAG(us, CI_UDPF_FILTERED);
    return 0;
}
示例#11
0
文件: socket.c 项目: ido/openonload
void ci_sock_cmn_timestamp_q_enqueue(ci_netif* ni, ci_sock_cmn* s,
                                     ci_ip_pkt_fmt* pkt)
{
  ci_ip_pkt_queue* qu = &s->timestamp_q;
  oo_pkt_p prev_head = qu->head;

  /* This part is effectively ci_ip_queue_enqueue(ni, &s->timestamp_q, p);
   * but inlined to allow using tsq_next field 
   */
  pkt->tsq_next = OO_PP_NULL;
  if( ci_ip_queue_is_empty(qu) ) {
    ci_assert(OO_PP_IS_NULL(qu->head));
    qu->head = OO_PKT_P(pkt);
  }
  else {
    ci_assert(OO_PP_NOT_NULL(qu->head));
    /* This assumes the netif lock is held, so use
       ci_ip_queue_enqueue_nnl() if it's not */
    PKT(ni, qu->tail)->tsq_next = OO_PKT_P(pkt);
  }
  qu->tail = OO_PKT_P(pkt);
  qu->num++;


  if( OO_PP_IS_NULL(prev_head) ) {
    ci_assert(OO_PP_IS_NULL(s->timestamp_q_extract));
    s->timestamp_q_extract = qu->head;
  } 
  else {
    ci_sock_cmn_timestamp_q_reap(ni, s);
  }

  /* Tells post-poll loop to put socket on the [reap_list]. */
  s->b.sb_flags |= CI_SB_FLAG_RX_DELIVERED;
}
示例#12
0
void citp_waitable_obj_free(ci_netif* ni, citp_waitable* w)
{
  ci_assert(ci_netif_is_locked(ni));

#ifdef __KERNEL__
  {
    /* Avoid racing with tcp_helper_do_non_atomic(). */
    tcp_helper_endpoint_t* ep = ci_netif_get_valid_ep(ni, w->bufid);
    unsigned ep_aflags;
  again:
    if( (ep_aflags = ep->ep_aflags) & OO_THR_EP_AFLAG_NON_ATOMIC ) {
      ci_assert(!(ep_aflags & OO_THR_EP_AFLAG_NEED_FREE));
      if( ci_cas32_fail(&ep->ep_aflags, ep_aflags,
                        ep_aflags | OO_THR_EP_AFLAG_NEED_FREE) )
        goto again;
      return;
    }
    ci_rmb();
  }
#endif

  __citp_waitable_obj_free(ni, w);
  w->wt_next = ni->state->free_eps_head;
  ni->state->free_eps_head = W_SP(w);
  /* Must be last, as may result in stack going away. */
  ci_drop_orphan(ni);
}
示例#13
0
/* Should be called when netif is locked */
static int oo_pipe_init(ci_netif* ni, struct oo_pipe* p)
{
  ci_assert(ni);
  ci_assert(p);

  /* init waitable */
  citp_waitable_reinit(ni, &p->b);

  p->b.state = CI_TCP_STATE_PIPE;

  p->bytes_added = 0;
  p->bytes_removed = 0;

  p->aflags = 0;

  oo_pipe_buf_clear_state(ni, p);

  p->bufs_num = 0;

  /* We add extra buffer to ensure we can always fill the pipe to at least
   * pipe_size bytes. This extra buffer is needed because the buffer
   * under read_ptr can be blocked */
  p->bufs_max = OO_PIPE_SIZE_TO_BUFS(CITP_OPTS.pipe_size) + 1;

  return 0;
}
示例#14
0
void ci_iarray_mode(const int* start, const int* end, int* mode_out)
{
  int current_v, mode, current_n, mode_n;
  const int* i;

  ci_iarray_assert_valid(start, end);
  ci_assert(end - start > 0);
  ci_assert(mode_out);

  current_v = mode = *start;
  current_n = mode_n = 1;

  for( i = start + 1; i != end; ++i ) {
    if( *i != current_v ) {
      if( current_n > mode_n ) {
	mode_n = current_n;
	mode = current_v;
      }
      current_v = *i;
      current_n = 0;
    }
    ++current_n;
  }
  if( current_n > mode_n ) {
    mode_n = current_n;
    mode = current_v;
  }

  *mode_out = mode;
}
示例#15
0
void ci_ip_send_pkt_lookup(ci_netif* ni,
                           const struct oo_sock_cplane* sock_cp_opt,
                           ci_ip_pkt_fmt* pkt,
                           ci_ip_cached_hdrs* ipcache)
{
  ci_ip4_hdr* pkt_ip = oo_tx_ip_hdr(pkt);
  struct oo_sock_cplane sock_cp;

  ci_assert(pkt_ip->ip_saddr_be32 != 0);
  ci_assert(pkt_ip->ip_daddr_be32 != 0);

  if( sock_cp_opt != NULL )
    sock_cp = *sock_cp_opt;
  else
    oo_sock_cplane_init(&sock_cp);
  ci_ip_cache_init(ipcache);
  sock_cp.ip_laddr_be32 = pkt_ip->ip_saddr_be32;
  ipcache->ip.ip_daddr_be32 = pkt_ip->ip_daddr_be32;

  switch( pkt_ip->ip_protocol ) {
  case IPPROTO_UDP:
  case IPPROTO_TCP:
    sock_cp.lport_be16 = TX_PKT_SPORT_BE16(pkt);
    ipcache->dport_be16 = TX_PKT_DPORT_BE16(pkt);
    break;
  default:
    sock_cp.lport_be16 = 0;
    ipcache->dport_be16 = 0;
    break;
  }

  cicp_user_retrieve(ni, ipcache, &sock_cp);
}
示例#16
0
void __citp_fdtable_busy_clear_slow(unsigned fd, citp_fdinfo_p new_fdip,
				    int fdt_locked)
{
  volatile citp_fdinfo_p* p_fdip = &citp_fdtable.table[fd].fdip;
  citp_fdinfo_p fdip, next;
  citp_fdtable_waiter* waiter;

  ci_assert(fd < citp_fdtable.inited_count);

 again:
  fdip = *p_fdip;
  ci_assert(fdip_is_busy(fdip));
  waiter = fdip_to_waiter(fdip);
  ci_assert(waiter);
  ci_assert(fdip_is_busy(waiter->next));
  if( waiter->next == fdip_busy )  next = new_fdip;
  else                             next = waiter->next;
  if( fdip_cas_fail(p_fdip, fdip, next) )  goto again;

  oo_rwlock_cond_lock(&waiter->cond);
  oo_rwlock_cond_broadcast(&waiter->cond);
  oo_rwlock_cond_unlock(&waiter->cond);

  if( next != new_fdip )  goto again;

}
示例#17
0
/* unpick the ci_ip_timer structure to actually do the callback */ 
static void ci_ip_timer_docallback(ci_netif *netif, ci_ip_timer* ts)
{
  ci_assert( TIME_LE(ts->time, ci_ip_time_now(netif)) );
  ci_assert( ts->time == IPTIMER_STATE(netif)->sched_ticks );

  switch(ts->fn){
  case CI_IP_TIMER_TCP_RTO:
    CHECK_TS(netif, SP_TO_TCP(netif, ts->param1));
    ci_tcp_timeout_rto(netif, SP_TO_TCP(netif, ts->param1));
    break;
  case CI_IP_TIMER_TCP_DELACK:
    CHECK_TS(netif, SP_TO_TCP(netif, ts->param1));
    ci_tcp_timeout_delack(netif, SP_TO_TCP(netif, ts->param1));
    break;
  case CI_IP_TIMER_TCP_ZWIN:
    CHECK_TS(netif, SP_TO_TCP(netif, ts->param1));
    ci_tcp_timeout_zwin(netif, SP_TO_TCP(netif, ts->param1));
    break;
  case CI_IP_TIMER_TCP_KALIVE:
    CHECK_TS(netif, SP_TO_TCP(netif, ts->param1));
    ci_tcp_timeout_kalive(netif, SP_TO_TCP(netif, ts->param1));
    break;
  case CI_IP_TIMER_TCP_LISTEN:
    ci_tcp_timeout_listen(netif, SP_TO_TCP_LISTEN(netif, ts->param1));    
    break;
  case CI_IP_TIMER_TCP_CORK:
    ci_tcp_timeout_cork(netif, SP_TO_TCP(netif, ts->param1));
    break;
  case CI_IP_TIMER_NETIF_TIMEOUT:
    ci_netif_timeout_state(netif);
    break;
  case CI_IP_TIMER_PMTU_DISCOVER:
    ci_pmtu_timeout_pmtu(netif, SP_TO_TCP(netif, ts->param1));
    break;
#if CI_CFG_TCP_SOCK_STATS
  case CI_IP_TIMER_TCP_STATS:
	ci_tcp_stats_action(netif, SP_TO_TCP(netif, ts->param1), 
                        CI_IP_STATS_FLUSH, 
                        CI_IP_STATS_OUTPUT_NONE, NULL, NULL );
    break;
#endif
#if CI_CFG_SUPPORT_STATS_COLLECTION
  case CI_IP_TIMER_NETIF_STATS:
    ci_netif_stats_action(netif, CI_IP_STATS_FLUSH,
                          CI_IP_STATS_OUTPUT_NONE, NULL, NULL );
    break;
#endif
#if CI_CFG_IP_TIMER_DEBUG
  case CI_IP_TIMER_DEBUG_HOOK:
    ci_ip_timer_debug_fn(netif, ts->link.addr, ts->param1);
    break;
#endif
  default:
    LOG_U(log( LPF "unknown timer callback code:%x param1:%d",
	       ts->fn, OO_SP_FMT(ts->param1)));    
    CI_DEBUG(ci_fail_stop_fn());
  }  
}
示例#18
0
static void citp_fdinfo_do_handover(citp_fdinfo* fdi, int fdt_locked)
{
  int rc;
  citp_fdinfo* epoll_fdi = NULL;
  int os_fd = fdi->fd;
#ifndef NDEBUG
  /* Yuk: does for UDP too. */
  volatile citp_fdinfo_p* p_fdip;
  p_fdip = &citp_fdtable.table[fdi->fd].fdip;
  ci_assert(fdip_is_busy(*p_fdip));
#endif


  Log_V(ci_log("%s: fd=%d nonb_switch=%d", __FUNCTION__, fdi->fd,
	       fdi->on_rcz.handover_nonb_switch));

  if( fdi->epoll_fd >= 0 ) {
    epoll_fdi = citp_epoll_fdi_from_member(fdi, fdt_locked);
    if( epoll_fdi->protocol->type == CITP_EPOLLB_FD )
      citp_epollb_on_handover(epoll_fdi, fdi);
  }
  rc = fdtable_fd_move(fdi->fd, OO_IOC_TCP_HANDOVER);
  if( rc == -EBUSY && fdi->epoll_fd >= 0 ) {
    ci_assert(fdi_to_sock_fdi(fdi)->sock.s->b.sb_aflags &
              CI_SB_AFLAG_MOVED_AWAY);
    /* If this is our epoll, we can do full handover: we manually add os
     * fd into the epoll set.
     * Fixme: ensure we are not in _other_ epoll sets */
    ci_bit_clear(&fdi_to_sock_fdi(fdi)->sock.s->b.sb_aflags,
                 CI_SB_AFLAG_MOVED_AWAY_IN_EPOLL_BIT);
    rc = fdtable_fd_move(fdi->fd, OO_IOC_FILE_MOVED);
  }
  if( rc != 0 ) {
    citp_fdinfo* new_fdi;
    if( ! fdt_locked ) CITP_FDTABLE_LOCK();
    new_fdi = citp_fdtable_probe_locked(fdi->fd, CI_TRUE, CI_TRUE);
    citp_fdinfo_release_ref(new_fdi, 1);
    if( ! fdt_locked ) CITP_FDTABLE_UNLOCK();
    ci_assert_equal(citp_fdinfo_get_type(new_fdi), CITP_PASSTHROUGH_FD);
    os_fd = fdi_to_alien_fdi(new_fdi)->os_socket;
  }
  if( fdi->on_rcz.handover_nonb_switch >= 0 ) {
    int on_off = !! fdi->on_rcz.handover_nonb_switch;
    int rc = ci_sys_ioctl(os_fd, FIONBIO, &on_off);
    if( rc < 0 )
      Log_E(ci_log("%s: ioctl failed on_off=%d", __FUNCTION__, on_off));
  }
  if( rc != 0 )
    goto exit;
  citp_fdtable_busy_clear(fdi->fd, fdip_passthru, fdt_locked);
exit:
  citp_fdinfo_get_ops(fdi)->dtor(fdi, fdt_locked);
  if( epoll_fdi != NULL && epoll_fdi->protocol->type == CITP_EPOLL_FD )
    citp_epoll_on_handover(epoll_fdi, fdi, fdt_locked);
  if( epoll_fdi != NULL )
    citp_fdinfo_release_ref(epoll_fdi, fdt_locked);
  citp_fdinfo_free(fdi);
}
示例#19
0
static void oo_bufpage_huge_free(struct oo_buffer_pages *p)
{
  ci_assert(p->shmid >= 0);
  ci_assert(current);

  put_page(p->pages[0]);
  efab_linux_sys_shmctl(p->shmid, IPC_RMID, NULL);
  oo_iobufset_kfree(p);
}
示例#20
0
void ci_udp_state_free(ci_netif* ni, ci_udp_state* us)
{
  ci_assert(ci_netif_is_locked(ni));
  ci_assert(us->s.b.state == CI_TCP_STATE_UDP);
  ci_assert(ci_ni_dllist_is_self_linked(ni, &us->s.b.post_poll_link));

  ci_udp_recv_q_drop(ni, &us->timestamp_q);

  citp_waitable_obj_free(ni, &us->s.b);
}
示例#21
0
citp_waitable_obj* citp_waitable_obj_alloc(ci_netif* netif)
{
  citp_waitable_obj* wo;

  ci_assert(netif);
  ci_assert(ci_netif_is_locked(netif));

  if( netif->state->deferred_free_eps_head != CI_ILL_END ) {
    ci_uint32 link;
    do
      link = netif->state->deferred_free_eps_head;
    while( ci_cas32_fail(&netif->state->deferred_free_eps_head,
                         link, CI_ILL_END));
    while( link != CI_ILL_END ) {
      citp_waitable* w = ID_TO_WAITABLE(netif, link);
      link = w->next_id;
      CI_DEBUG(w->next_id = CI_ILL_END);
      ci_assert_equal(w->state, CI_TCP_STATE_FREE);
      ci_assert(OO_SP_IS_NULL(w->wt_next));
      w->wt_next = netif->state->free_eps_head;
      netif->state->free_eps_head = W_SP(w);
    }
  }

  if( OO_SP_IS_NULL(netif->state->free_eps_head) ) {
    ci_tcp_helper_more_socks(netif);

    if( OO_SP_IS_NULL(netif->state->free_eps_head) )
      ci_netif_timeout_reap(netif);
  }

  if( OO_SP_IS_NULL(netif->state->free_eps_head) )
    return NULL;

  LOG_TV(ci_log("%s: allocating %d", __FUNCTION__,
                OO_SP_FMT(netif->state->free_eps_head)));

  ci_assert(IS_VALID_SOCK_P(netif, netif->state->free_eps_head));
#if !defined(__KERNEL__) && !defined (CI_HAVE_OS_NOPAGE)
  ci_netif_mmap_shmbuf(netif,
                       (netif->state->free_eps_head >> EP_BUF_BLOCKSHIFT) + 1);
#endif
  wo = SP_TO_WAITABLE_OBJ(netif, netif->state->free_eps_head);

  ci_assert(OO_SP_EQ(W_SP(&wo->waitable), netif->state->free_eps_head));
  ci_assert_equal(wo->waitable.state, CI_TCP_STATE_FREE);
  ci_assert_equal(wo->waitable.sb_aflags, (CI_SB_AFLAG_ORPHAN | CI_SB_AFLAG_NOT_READY));
  ci_assert_equal(wo->waitable.lock.wl_val, 0);

  netif->state->free_eps_head = wo->waitable.wt_next;
  CI_DEBUG(wo->waitable.wt_next = OO_SP_NULL);
  ci_assert_equal(wo->waitable.state, CI_TCP_STATE_FREE);

  return wo;
}
示例#22
0
int
efab_ioctl_get_ep(ci_private_t* priv, oo_sp sockp,
                  tcp_helper_endpoint_t** ep_out)
{
  ci_assert(ep_out != NULL);
  if( priv->thr == NULL || ! IS_VALID_SOCK_P(&priv->thr->netif, sockp) )
    return -EINVAL;
  *ep_out = ci_trs_ep_get(priv->thr, sockp);
  ci_assert(*ep_out != NULL);
  return 0;
}
示例#23
0
void __citp_fdinfo_ref_count_zero(citp_fdinfo* fdi, int fdt_locked)
{
  Log_V(log("%s: fd=%d on_rcz=%d", __FUNCTION__, fdi->fd,
	    fdi->on_ref_count_zero));

  citp_fdinfo_assert_valid(fdi);
  ci_assert(oo_atomic_read(&fdi->ref_count) == 0);
  ci_assert_ge(fdi->fd, 0);
  ci_assert_lt(fdi->fd, citp_fdtable.inited_count);
  ci_assert_nequal(fdi_to_fdip(fdi), citp_fdtable.table[fdi->fd].fdip);

  switch( fdi->on_ref_count_zero ) {
  case FDI_ON_RCZ_CLOSE:
#if CI_CFG_FD_CACHING
    if( citp_fdinfo_get_ops(fdi)->cache(fdi) == 1 ) {
      if( ! fdt_locked && fdtable_strict() )  CITP_FDTABLE_LOCK();
      fdtable_swap(fdi->fd, fdip_closing, fdip_unknown,
                   fdt_locked | fdtable_strict());
      citp_fdinfo_get_ops(fdi)->dtor(fdi, fdt_locked | fdtable_strict());
      if( ! fdt_locked && fdtable_strict() )  CITP_FDTABLE_UNLOCK();
      citp_fdinfo_free(fdi);
      break;
    }
    else 
#endif
    {
      if( ! fdt_locked && fdtable_strict() )  CITP_FDTABLE_LOCK();
      ci_tcp_helper_close_no_trampoline(fdi->fd);
      /* The swap must occur after the close, otherwise another thread could
       * cause a probe of the old endpoint info, which is about be freed.
       */
      fdtable_swap(fdi->fd, fdip_closing, fdip_unknown,
		   fdt_locked | fdtable_strict());
      citp_fdinfo_get_ops(fdi)->dtor(fdi, fdt_locked | fdtable_strict());
      if( ! fdt_locked && fdtable_strict() )  CITP_FDTABLE_UNLOCK();
      citp_fdinfo_free(fdi);
      break;
    }
  case FDI_ON_RCZ_DUP2:
    dup2_complete(fdi, fdi_to_fdip(fdi), fdt_locked);
    break;
  case FDI_ON_RCZ_HANDOVER:
    citp_fdinfo_do_handover(fdi, fdt_locked);
    break;
  case FDI_ON_RCZ_MOVED:
    citp_fdinfo_get_ops(fdi)->dtor(fdi, fdt_locked);
    citp_fdinfo_free(fdi);
    break;
  default:
    CI_DEBUG(ci_log("%s: fd=%d on_ref_count_zero=%d", __FUNCTION__,
		    fdi->fd, fdi->on_ref_count_zero));
    ci_assert(0);
  }
}
示例#24
0
static int citp_pipe_send(citp_fdinfo* fdinfo,
                          const struct msghdr* msg, int flags)
{
  citp_pipe_fdi* epi = fdi_to_pipe_fdi(fdinfo);

  ci_assert_equal(flags, 0);
  ci_assert(msg);
  ci_assert(msg->msg_iov);

  return ci_pipe_write(epi->ni, epi->pipe, msg->msg_iov, msg->msg_iovlen);
}
示例#25
0
void citp_epinfo_init(citp_epinfo* epinfo, citp_protocol_impl* protocol)
{
  ci_assert(epinfo);
  ci_assert(protocol);

  epinfo->protocol = protocol;

  /* Start at zero.  It will be increased whenever an endpoint is inserted
  ** into the fdtable.
  */
  oo_atomic_set(&epinfo->ref_count, 0);
}
示例#26
0
void citp_fdtable_insert(citp_fdinfo* fdi, unsigned fd, int fdt_locked)
{
  ci_assert(fdi);
  ci_assert(fdi->protocol);
  ci_assert(citp_fdtable.inited_count > fd);
  ci_assert_ge(oo_atomic_read(&fdi->ref_count), 1);

  fdi->fd = fd;
  CI_DEBUG(fdi->on_ref_count_zero = FDI_ON_RCZ_NONE);
  fdi->is_special = 0;
  citp_fdtable_busy_clear(fd, fdi_to_fdip(fdi), fdt_locked);
}
示例#27
0
void citp_waitable_wake_not_in_poll(ci_netif* ni, citp_waitable* sb,
                                    unsigned what)
{
  ci_assert(what);
  ci_assert((what & ~(CI_SB_FLAG_WAKE_RX|CI_SB_FLAG_WAKE_TX)) == 0u);
  ci_assert(!ni->state->in_poll);
  ci_wmb();
  if( what & CI_SB_FLAG_WAKE_RX )
    ++sb->sleep_seq.rw.rx;
  if( what & CI_SB_FLAG_WAKE_TX )
    ++sb->sleep_seq.rw.tx;
  ci_mb();

#ifdef __KERNEL__
  /* Normally we put an object on a ready list in ci_netif_put_on_post_poll,
   * but in this case we don't go via there, so have to explicitly queue on
   * the ready list here.
   */
  ci_ni_dllist_remove(ni, &sb->ready_link);
  ci_ni_dllist_put(ni, &ni->state->ready_lists[sb->ready_list_id],
                   &sb->ready_link);

  if( what & sb->wake_request ) {
    sb->sb_flags |= what;
    citp_waitable_wakeup(ni, sb);
  }

  /* Wake the ready list too, if that's requested it. */
  if( ni->state->ready_list_flags[sb->ready_list_id] &
      CI_NI_READY_LIST_FLAG_WAKE )
    efab_tcp_helper_ready_list_wakeup(netif2tcp_helper_resource(ni),
                                      sb->ready_list_id);
#else
  if( what & sb->wake_request ) {
    sb->sb_flags |= what;
    ci_netif_put_on_post_poll(ni, sb);
    ef_eplock_holder_set_flag(&ni->state->lock, CI_EPLOCK_NETIF_NEED_WAKE);
  }
  else {
    /* Normally we put an object on a ready list in ci_netif_put_on_post_poll,
     * but in this case we don't go via there, so have to explicitly queue on
     * the ready list here.
     */
    ci_ni_dllist_remove(ni, &sb->ready_link);
    ci_ni_dllist_put(ni, &ni->state->ready_lists[sb->ready_list_id],
                     &sb->ready_link);

    if( ni->state->ready_list_flags[sb->ready_list_id] &
        CI_NI_READY_LIST_FLAG_WAKE )
      ef_eplock_holder_set_flag(&ni->state->lock, CI_EPLOCK_NETIF_NEED_WAKE);
  }
#endif
}
示例#28
0
/*! Perform system bind on the OS backing socket.
 * \param ep       Endpoint context
 * \param fd       Callers FD
 * \param ip_addr_be32  Local address to which to bind
 * \param port_be16     [in] requested port [out] assigned port
 * \return         0 - success & [port_be16] updated
 *                 CI_SOCKET_HANDOVER, Pass to OS, OS bound ok, (no error)
 *                 CI_SOCKET_ERROR & errno set
 */
ci_inline int __ci_bind(ci_netif *ni, ci_sock_cmn *s,
                        ci_uint32 ip_addr_be32, ci_uint16* port_be16 )
{
  int rc;
  ci_uint16 user_port; /* Port number specified by user, not by OS.
                        * See bug 4015 for details */
  union ci_sockaddr_u sa_u;

  ci_assert(s->domain == AF_INET || s->domain == AF_INET6);

  ci_assert( port_be16 );

  user_port = *port_be16;
#if CI_CFG_FAKE_IPV6
  ci_assert(s->domain == AF_INET || s->domain == AF_INET6);
  if( s->domain == AF_INET )
    ci_make_sockaddr(&sa_u.sin, s->domain, user_port, ip_addr_be32);
  else
    ci_make_sockaddr6(&sa_u.sin6, s->domain, user_port, ip_addr_be32);
#else
  ci_assert(s->domain == AF_INET);
  ci_make_sockaddr(&sa_u.sin, s->domain, user_port, ip_addr_be32);
#endif

#ifdef __ci_driver__
  rc = efab_tcp_helper_bind_os_sock(netif2tcp_helper_resource(ni),
                                    SC_SP(s),
                                    &sa_u.sa, sizeof(sa_u), port_be16);
#else
  rc = ci_tcp_helper_bind_os_sock(ni, SC_SP(s), &sa_u.sa,
                                  sizeof(sa_u), port_be16);
#endif

  /* bug1781: only do this if the earlier bind succeeded. 
   * check if we can handle this socket */
  if( rc != 0 )
    return rc;
  if( user_port != 0 )
    s->s_flags |= CI_SOCK_FLAG_PORT_BOUND;
  if( ip_addr_be32 != INADDR_ANY )
    s->s_flags |= CI_SOCK_FLAG_ADDR_BOUND;
  s->s_flags &= ~CI_SOCK_FLAG_CONNECT_MUST_BIND;

#ifndef __ci_driver__
  /* We do not call bind() to alien address from in-kernel code */
  if( ip_addr_be32 != INADDR_ANY &&
      !cicp_user_addr_is_local_efab(CICP_HANDLE(ni), &ip_addr_be32) )
    s->s_flags |= CI_SOCK_FLAG_BOUND_ALIEN;
#endif
  
  return rc;
}
示例#29
0
/* 2004/08/16 stg: added [fdt_locked] to allow the fd table to be
 * locked before this function.  [fdt_locked] = 0 for legacy operation
 */
void __citp_epinfo_ref_count_zero(citp_epinfo* epinfo,citp_fdinfo* last_fdinfo, 
				  int fdt_locked)
{
  Log_V(log(LPF "ref_count_zero(%p, %d)", epinfo, last_fdinfo->fd));

  ci_assert(epinfo);
  ci_assert(oo_atomic_read(&epinfo->ref_count) == 0);
  ci_assert(epinfo->protocol);
  ci_assert(last_fdinfo);
  ci_assert(last_fdinfo->ep == epinfo);

  epinfo->protocol->ops.dtor(epinfo, last_fdinfo, fdt_locked);
}
示例#30
0
static int oo_hw_filter_set_hwport(struct oo_hw_filter* oofilter, int hwport,
                                   int protocol,
                                   unsigned saddr, int sport,
                                   unsigned daddr, int dport,
                                   ci_uint16 vlan_id, unsigned src_flags)
{
  struct efx_filter_spec spec;
  int rc = 0, vi_id;

  ci_assert_equal(oofilter->thc, NULL);
  ci_assert(oofilter->filter_id[hwport] < 0);

  if( (vi_id = tcp_helper_rx_vi_id(oofilter->trs, hwport)) >= 0 ) {
    int flags = EFX_FILTER_FLAG_RX_SCATTER;
    int hw_rx_loopback_supported =
      tcp_helper_vi_hw_rx_loopback_supported(oofilter->trs, hwport);

    ci_assert( hw_rx_loopback_supported >= 0 );
    if( hw_rx_loopback_supported && (src_flags & OO_HW_SRC_FLAG_LOOPBACK) ) {
      flags |= EFX_FILTER_FLAG_TX;
    }

    efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED, flags, vi_id);

#if EFX_DRIVERLINK_API_VERSION >= 15
    {
      unsigned stack_id = tcp_helper_vi_hw_stack_id(oofilter->trs, hwport);
      ci_assert( stack_id >= 0 );
      efx_filter_set_stack_id(&spec, stack_id);
    }
#endif

    if( saddr != 0 )
      rc = efx_filter_set_ipv4_full(&spec, protocol, daddr, dport,
                                    saddr, sport);
    else
      rc = efx_filter_set_ipv4_local(&spec, protocol, daddr, dport);
    ci_assert_equal(rc, 0);
    /* note: bug 42561 affecting loopback on VLAN 0 with fw <= v4_0_6_6688 */
    if( vlan_id != OO_HW_VLAN_UNSPEC ) {
      rc = efx_filter_set_eth_local(&spec, vlan_id, NULL);
      ci_assert_equal(rc, 0);
    }
    rc = efrm_filter_insert(get_client(hwport), &spec, false);
    if( rc >= 0 ) {
      oofilter->filter_id[hwport] = rc;
      rc = 0;
    }
  }
  return rc;
}