Exemplo n.º 1
0
static citp_fdinfo* citp_epoll_dup(citp_fdinfo* orig_fdi)
{
  citp_fdinfo    *fdi;
  citp_epoll_fdi *epi;
  struct citp_epoll_fd* ep = fdi_to_epoll(orig_fdi);

  epi = CI_ALLOC_OBJ(citp_epoll_fdi);
  if (!epi)
    return NULL;

  fdi = &epi->fdinfo;
  citp_fdinfo_init(fdi, &citp_epoll_protocol_impl);
  epi->epoll = ep;
  oo_atomic_inc(&ep->refcount);
  return fdi;
}
Exemplo n.º 2
0
static void tcp_helper_cluster_ref(tcp_helper_cluster_t* thc)
{
  oo_atomic_inc(&thc->thc_ref_count);
}
Exemplo n.º 3
0
int
oo_iobufset_resource_alloc(struct oo_buffer_pages * pages, struct efrm_pd *pd,
                           struct oo_iobufset **iobrs_out, uint64_t *hw_addrs,
                           int reset_pending)
{
  struct oo_iobufset *iobrs;
  int rc;
  int gfp_flag = (in_atomic() || in_interrupt()) ? GFP_ATOMIC : GFP_KERNEL;
  int size = sizeof(struct oo_iobufset) + pages->n_bufs * sizeof(dma_addr_t);
  int nic_order;
  void **addrs;
  unsigned int i;

  ci_assert(iobrs_out);
  ci_assert(pd);

  if( size <= PAGE_SIZE ) {
    iobrs = kmalloc(size, gfp_flag);
    if( iobrs == NULL )
      return -ENOMEM;
    iobrs->dma_addrs = (void *)(iobrs + 1);
  }
  else {
    /* Avoid multi-page allocations */
    iobrs = kmalloc(sizeof(struct oo_iobufset), gfp_flag);
    if( iobrs == NULL )
      return -ENOMEM;
    ci_assert_le(pages->n_bufs * sizeof(dma_addr_t), PAGE_SIZE);
    iobrs->dma_addrs = kmalloc(pages->n_bufs * sizeof(dma_addr_t), gfp_flag);
    if( iobrs->dma_addrs == NULL ) {
      kfree(iobrs);
      return -ENOMEM;
    }

  }

  oo_atomic_set(&iobrs->ref_count, 1);
  iobrs->pd = pd;
  iobrs->pages = pages;

  nic_order = EFHW_GFP_ORDER_TO_NIC_ORDER(compound_order(pages->pages[0]));

  ci_assert_le(sizeof(void *) * pages->n_bufs, PAGE_SIZE);
  addrs = kmalloc(sizeof(void *) * pages->n_bufs, gfp_flag);
  if (addrs == NULL)
  {
    rc = -ENOMEM;
    goto fail;
  }

  for (i = 0; i < pages->n_bufs; i++) {
    addrs[i] = page_address(pages->pages[i]);
  }

  rc = efrm_pd_dma_map(iobrs->pd, pages->n_bufs,
		       nic_order,
		       addrs, sizeof(addrs[0]),
		       &iobrs->dma_addrs[0], sizeof(iobrs->dma_addrs[0]),
		       hw_addrs, sizeof(hw_addrs[0]),
		       put_user_fake, &iobrs->buf_tbl_alloc, reset_pending);
  kfree(addrs);

  if( rc < 0 )
    goto fail;

  OO_DEBUG_VERB(ci_log("%s: [%p] %d pages", __FUNCTION__,
                       iobrs, iobrs->pages->n_bufs));

  efrm_resource_ref(efrm_pd_to_resource(pd));
  oo_atomic_inc(&pages->ref_count);
  *iobrs_out = iobrs;
  return 0;

fail:
  oo_iobufset_free_memory(iobrs);
  return rc;
}
Exemplo n.º 4
0
/* Allocates a new stack in thc.
 *
 * You need to efab_thr_release() the stack returned by this function
 * when done.
 *
 * You must hold the thc_mutex before calling this function.
 */
static int thc_alloc_thr(tcp_helper_cluster_t* thc,
                         int cluster_restart_opt,
                         const ci_netif_config_opts* ni_opts,
                         int ni_flags,
                         tcp_helper_resource_t** thr_out)
{
  int rc;
  tcp_helper_resource_t* thr_walk;
  ci_resource_onload_alloc_t roa;
  ci_netif_config_opts* opts;
  ci_netif* netif;

  memset(&roa, 0, sizeof(roa));

  if( (rc = thc_get_next_thr_name(thc, roa.in_name)) != 0 ) {
    /* All stack names taken i.e. cluster is full.  Based on setting
     * of cluster_restart_opt, either kill a orphan or return error. */
    if( thc_has_orphans(thc) == 1 ) {
      /* Checking for CITP_CLUSTER_RESTART_TERMINATE_ORPHANS */
      if( cluster_restart_opt == 1 ) {
        thc_kill_an_orphan(thc);
        rc = thc_get_next_thr_name(thc, roa.in_name);
        ci_assert_equal(rc, 0);
      }
      else {
        LOG_E(ci_log("%s: Clustered stack creation failed because of "
                     "orphans.  Either try again later or use "
                     "EF_CLUSTER_RESTART", __FUNCTION__));
        return rc;
      }
    }
    else {
      LOG_E(ci_log("%s: Stack creation failed because all instances in "
                   "cluster already allocated.", __FUNCTION__));
      return rc;
    }
  }
  roa.in_flags = ni_flags;
  strncpy(roa.in_version, ONLOAD_VERSION, sizeof(roa.in_version));
  strncpy(roa.in_uk_intf_ver, oo_uk_intf_ver, sizeof(roa.in_uk_intf_ver));
  if( (opts = kmalloc(sizeof(*opts), GFP_KERNEL)) == NULL )
    return -ENOMEM;
  memcpy(opts, ni_opts, sizeof(*opts));
  rc = tcp_helper_rm_alloc(&roa, opts, -1, thc, &thr_walk);
  kfree(opts);
  if( rc != 0 )
    return rc;

  /* Do not allow clustered stacks to do TCP loopback. */
  netif = &thr_walk->netif;
  if( NI_OPTS(netif).tcp_server_loopback != CITP_TCP_LOOPBACK_OFF ||
      NI_OPTS(netif).tcp_client_loopback != CITP_TCP_LOOPBACK_OFF )
    ci_log("%s: Disabling Unsupported TCP loopback on clustered stack.",
           __FUNCTION__);
  NI_OPTS(netif).tcp_server_loopback = NI_OPTS(netif).tcp_client_loopback =
    CITP_TCP_LOOPBACK_OFF;

  thr_walk->thc_tid      = current->pid;
  thr_walk->thc          = thc;
  thr_walk->thc_thr_next = thc->thc_thr_head;
  thc->thc_thr_head      = thr_walk;

  if( (thr_walk->thc->thc_flags & THC_FLAG_TPROXY) != 0 )
    netif->state->flags |= CI_NETIF_FLAG_SCALABLE_FILTERS_RSS;

  oo_atomic_inc(&thc->thc_ref_count);
  *thr_out = thr_walk;
  return 0;
}