Esempio n. 1
0
void citp_fdtable_insert(citp_fdinfo* fdi, unsigned fd, int fdt_locked)
{
  ci_assert(fdi);
  ci_assert(fdi->protocol);
  ci_assert(citp_fdtable.inited_count > fd);
  ci_assert_ge(oo_atomic_read(&fdi->ref_count), 1);

  fdi->fd = fd;
  CI_DEBUG(fdi->on_ref_count_zero = FDI_ON_RCZ_NONE);
  fdi->is_special = 0;
  citp_fdtable_busy_clear(fd, fdi_to_fdip(fdi), fdt_locked);
}
Esempio n. 2
0
ssize_t linux_tcp_helper_fop_sendpage(struct file* filp, struct page* page, 
                                      int offset, size_t size,
                                      loff_t* ppos, int flags)
{
  ci_private_t* priv = filp->private_data;
  tcp_helper_resource_t* trs = efab_priv_to_thr(priv);
  ci_sock_cmn* s;

  OO_DEBUG_VERB(ci_log("%s: %d:%d offset=%d size=%d flags=%x", __FUNCTION__,
                       NI_ID(&trs->netif), OO_SP_FMT(priv->sock_id), offset,
                       (int) size, flags));

  ci_assert(page);
  ci_assert_ge(offset, 0);
  ci_assert_gt(size, 0);
  ci_assert_le(offset + size, CI_PAGE_SIZE);

#ifndef MSG_SENDPAGE_NOTLAST
  /* "flags" is really "more".  Convert it. */
  if( flags )
    flags = MSG_MORE;

  /* [more] is sometimes true even for the last page.  We get a little
  ** closer to the truth by spotting that we're not reading to the end of
  ** the page. - seen on 2.6.18, but not on 2.6.26 or later
  */
  if( offset + size < CI_PAGE_SIZE && flags )
    flags = 0;
#endif

  s = SP_TO_SOCK(&trs->netif, priv->sock_id);
  if(CI_LIKELY( s->b.state & CI_TCP_STATE_TCP_CONN ))
    return sendpage_copy(&trs->netif,SOCK_TO_TCP(s),page,offset,size,flags);
  else
    /* Closed or listening.  Return epipe.  Do not send SIGPIPE, because
    ** Linux will do it for us. */
    return -s->tx_errno;
}
Esempio n. 3
0
/* On PPC there is no 32-bit sigaction - or rather, all sigaction calls are 32-bit.
 */
asmlinkage int
efab_linux_trampoline_sigaction32(int sig, const struct sigaction32 *act32,
                                  struct sigaction32 *oact32,
                                  unsigned int sigsetsize)
{
  struct sigaction act, oact;
  compat_sigset_t set32;
  int rc;
  struct mm_signal_data *tramp_data;
  int pass_to_kernel = 0;

  efab_syscall_enter();

  if( sigsetsize != sizeof(compat_sigset_t) ) {
    efab_syscall_exit();
    return -EINVAL;
  }

  /* Is it our process? */
  if( efab_signal_get_tramp_data(&tramp_data) ) {
    rc = efab_linux_sys_sigaction32(sig, act32, oact32);
    efab_syscall_exit();
    return rc;
  }

  /* Do not change UL data if we are in vfork child */
  if( act32 != NULL && tramp_data->kernel_sighand != current->sighand )
    pass_to_kernel = 1;

  if( act32 != NULL && !pass_to_kernel ) {
    compat_uptr_t handler, restorer;

    if( !access_ok(VERIFY_READ, act32, sizeof(*act32)) ||
        __get_user(handler, &act32->sa_handler) ||
        __get_user(act.sa_flags, &act32->sa_flags) ||
        __get_user(restorer, &act32->sa_restorer) ||
        __copy_from_user(&set32, &act32->sa_mask, sizeof(compat_sigset_t)) ) {
      efab_signal_put_tramp_data(tramp_data);
      efab_syscall_exit();
      return -EFAULT;
    }
    act.sa_handler = compat_ptr(handler);
    act.sa_restorer = compat_ptr(restorer);

    ci_assert_ge(_COMPAT_NSIG_WORDS, _NSIG_WORDS << 1);
    switch (_NSIG_WORDS) { /* Note: no break */
    case 4: act.sa_mask.sig[3] = set32.sig[6] | (((long)set32.sig[7]) << 32);
    case 3: act.sa_mask.sig[2] = set32.sig[4] | (((long)set32.sig[5]) << 32);
    case 2: act.sa_mask.sig[1] = set32.sig[2] | (((long)set32.sig[3]) << 32);
    case 1: act.sa_mask.sig[0] = set32.sig[0] | (((long)set32.sig[1]) << 32);
    }
  }

  rc = efab_signal_do_sigaction(sig,
                                (act32 && !pass_to_kernel) ? &act : NULL,
                                oact32 ? &oact : NULL, tramp_data,
                                &pass_to_kernel);
  efab_signal_put_tramp_data(tramp_data);
  if( pass_to_kernel )
    efab_linux_sys_sigaction32(sig, act32, NULL);

  if( rc == 0 && oact32 != NULL ) {
    switch (_NSIG_WORDS) { /* Note: no break */
    case 4:
      set32.sig[7] = (oact.sa_mask.sig[3] >> 32);
      set32.sig[6] = oact.sa_mask.sig[3];
    case 3:
      set32.sig[5] = (oact.sa_mask.sig[2] >> 32);
      set32.sig[4] = oact.sa_mask.sig[2];
    case 2:
      set32.sig[3] = (oact.sa_mask.sig[1] >> 32);
      set32.sig[2] = oact.sa_mask.sig[1];
    case 1:
      set32.sig[1] = (oact.sa_mask.sig[0] >> 32);
      set32.sig[0] = oact.sa_mask.sig[0];
    }

    if( !access_ok(VERIFY_WRITE, oact32, sizeof(*oact32)) ||
        __put_user(ptr_to_compat(oact.sa_handler), &oact32->sa_handler) ||
        __put_user(ptr_to_compat(oact.sa_restorer), &oact32->sa_restorer) ||
        __put_user(oact.sa_flags, &oact32->sa_flags) ||
        __copy_to_user(&oact32->sa_mask, &set32, sizeof(compat_sigset_t))) {
      efab_syscall_exit();
      return -EFAULT;
    }
  }

  efab_syscall_exit();
  return rc;
}
Esempio n. 4
0
int citp_pipe_splice_write(citp_fdinfo* fdi, int alien_fd, loff_t* alien_off,
                           size_t olen, int flags,
                           citp_lib_context_t* lib_context)
{
  citp_pipe_fdi* epi = fdi_to_pipe_fdi(fdi);
  int len_in_bufs = OO_PIPE_SIZE_TO_BUFS(olen);
  struct iovec iov_on_stack[CITP_PIPE_SPLICE_WRITE_STACK_IOV_LEN];
  struct iovec* iov = iov_on_stack;
  int want_buf_count;
  int rc;
  int bytes_to_read;
  int len = olen;
  int no_more = 1; /* for now we only run single loop */
  int written_total = 0;
  int non_block = (flags & SPLICE_F_NONBLOCK) || (epi->pipe->aflags &
      (CI_PFD_AFLAG_NONBLOCK << CI_PFD_AFLAG_WRITER_SHIFT));
  if( fdi_is_reader(fdi) ) {
    errno = EINVAL;
    return -1;
  }
  if( alien_off ) {
    /* TODO support this */
    errno = ENOTSUP;
    return -1;
  }
  do {
    int count;
    int iov_num;
    int bytes_to_write;
    struct ci_pipe_pkt_list pkts = {};
    struct ci_pipe_pkt_list pkts2;
    want_buf_count = len_in_bufs;
    /* We might need to wait for buffers here on the first iteration */
    rc = ci_pipe_zc_alloc_buffers(epi->ni, epi->pipe, want_buf_count,
                                  MSG_NOSIGNAL | (non_block || written_total ?
                                  MSG_DONTWAIT : 0),
                                  &pkts);
    if( rc < 0 && written_total ) {
      /* whatever the error we need to report already written_bytes */
      rc = written_total;
      break;
    }
    else if( rc < 0 )
      break;
    else if( pkts.count == 0 && non_block ) {
      errno = EAGAIN;
      rc = -1;
      break;
    }
    else
      ci_assert_gt(pkts.count, 0);
    count = pkts.count;

    if( count > CITP_PIPE_SPLICE_WRITE_STACK_IOV_LEN ) {
      void* niov = realloc(iov == iov_on_stack ? NULL : iov,
                           sizeof(*iov) * len_in_bufs);
      if( niov == NULL )
        /* we can still move quite a few pkts */
        count = CITP_PIPE_SPLICE_WRITE_STACK_IOV_LEN;
      else
        niov = iov;
    }

    ci_assert_ge(count, 1);

    iov_num = count;
    pkts2 = pkts;
    bytes_to_read = ci_pipe_list_to_iovec(epi->ni, epi->pipe, iov, &iov_num,
                                          &pkts2, len);

    citp_exit_lib_if(lib_context, TRUE);
    /* Note: the following call might be non-blocking as well as blocking */
    rc = readv(alien_fd, iov, count);
    citp_reenter_lib(lib_context);

    if( rc > 0 ) {
      bytes_to_write = rc;
      written_total += bytes_to_write;
      len -= bytes_to_write;
      no_more |= bytes_to_write < bytes_to_read;
    }
    else {
      bytes_to_write = 0;
      no_more = 1;
    }

    {
      /* pipe zc_write will write non_empty buffers and release the empty
       * ones */
      int rc2 = ci_pipe_zc_write(epi->ni, epi->pipe, &pkts, bytes_to_write,
                  CI_PIPE_ZC_WRITE_FLAG_FORCE | MSG_DONTWAIT | MSG_NOSIGNAL);
      (void) rc2;
      ci_assert_equal(rc2, bytes_to_write);
    }
    /* for now we will not be doing second iteration, to allow for that
     * we'd need to have guarantee that read will not block
     * e.g. insight into type of fd and a nonblokcing operation
     * (to name a valid case: socket, recvmsg) */
  } while( ! no_more );

  if( iov != iov_on_stack )
    free(iov);
  if( rc > 0 )
    return written_total;
  if( rc < 0 && errno == EPIPE && ! (flags & MSG_NOSIGNAL) ) {
    ci_sys_ioctl(ci_netif_get_driver_handle(epi->ni),
                 OO_IOC_KILL_SELF_SIGPIPE, NULL);
  }
  return rc;
}