static int ComputeFragmentSizes(const NaClSrpcMessageHeader* header,
                                enum FragmentPosition fragment_position,
                                LengthHeader* fragment_size) {
  size_t byte_count;
  size_t max_user_bytes;

  if (0 == NaClSrpcMaxImcSendmsgSize) {
    NaClSrpcLog(NACL_SRPC_LOG_ERROR,
                "ComputeFragmentSizes: NaClSrpcModuleInit not called.\n");
    return 0;
  }
  /* NaClSrpcMaxImcSendmsgSize is guaranteed to to avoid underflow. */
  max_user_bytes =
      NaClSrpcMaxImcSendmsgSize - kFragmentOverhead[fragment_position];
  byte_count = (size_t)
      HeaderTotalBytes(header, kFragmentHeaderCount[fragment_position]);
  if (-1 == (ssize_t) byte_count) {
    NaClSrpcLog(NACL_SRPC_LOG_ERROR,
                "ComputeFragmentSizes: byte_count was incorrect.\n");
    return 0;
  }
  /* NaClSrpcMaxImcSendmsgSize <= NACL_ABI_SIZE_T_MAX, so cast is safe. */
  fragment_size->byte_count = (nacl_abi_size_t)
      size_min(byte_count, max_user_bytes);
  /* SRPC_DESC_MAX <= NACL_ABI_SIZE_T_MAX, so cast is safe. */
  fragment_size->desc_count = (nacl_abi_size_t)
      size_min(SRPC_DESC_MAX, header->NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH);
  return 1;
}
Пример #2
0
void test_expression(const E& expr, size_t size, Fn&& fn, const char* expression = nullptr)
{
    using T                  = value_type_of<E>;
    ::testo::test_case* test = ::testo::active_test();
    auto&& c                 = ::testo::make_comparison();
    test->check(c <= expr.size() == size, expression);
    if (expr.size() != size)
        return;
    size                     = size_min(size, 200);
    constexpr size_t maxsize = 2 + ilog2(vector_width<T> * 2);
    for (size_t i = 0; i < size;)
    {
        const size_t next_size =
            std::min(prev_poweroftwo(size - i), static_cast<size_t>(1) << (std::rand() % maxsize));

        cswitch(csize<1> << csizeseq<maxsize>, next_size, [&](auto x) {
            constexpr size_t nsize = val_of(decltype(x)());
            ::testo::scope s(as_string("i = ", i));
            test->check(c <= get_elements(expr, cinput, i, vec_shape<T, nsize>()) ==
                            internal::get_fn_value<T, nsize>(i, fn),
                        expression);
        });
        i += next_size;
    }
}
static void ConsumeFragment(NaClSrpcMessageHeader* header,
                            LengthHeader* fragment_size,
                            size_t guaranteed_entries) {
  size_t descs_read;
  /*
   * The caller has already checked that the number of bytes read is sufficient
   * to ensure that the first "guaranteed_entries" iov entries were satisfied.
   * guaranteed_entries is passed as a constant 1 or 2, so cast is safe.
   */
  header->iov += (nacl_abi_size_t) guaranteed_entries;
  header->iov_length -= (nacl_abi_size_t) guaranteed_entries;
  /* Update to reflect the fragment's descriptors that were consumed. */
  descs_read =
      size_min(fragment_size->desc_count,
               header->NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH);
  /* Post-condition: descs_read <= NACL_ABI_SIZE_T_MAX. */
  header->NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH -= (nacl_abi_size_t) descs_read;
  header->NACL_SRPC_MESSAGE_HEADER_DESCV += (nacl_abi_size_t) descs_read;
  fragment_size->desc_count -= (nacl_abi_size_t) descs_read;
  /*
   * Update the header and iov vector to reflect which entries are already
   * satisfied.
   */
  while ((header->iov_length > 0) && (fragment_size->byte_count > 0)) {
    size_t bytes_for_this_entry;
    bytes_for_this_entry =
        size_min(header->iov[0].length, fragment_size->byte_count);
    /* Post-condition: bytes_for_this_entry <= NACL_ABI_SIZE_T_MAX. */
    header->iov[0].length -= (nacl_abi_size_t) bytes_for_this_entry;
    fragment_size->byte_count -= (nacl_abi_size_t) bytes_for_this_entry;
    if (header->iov[0].length > 0) {
      /* The fragment was exhausted, but didn't satisfy this iov entry. */
      header->iov[0].base = (char*) header->iov[0].base + bytes_for_this_entry;
      break;
    }
    /* This iov entry was satisfied. Remove it from the vector. */
    header->iov++;
    header->iov_length--;
  }
  if (fragment_size->byte_count > 0) {
    header->flags |= NACL_ABI_RECVMSG_DATA_TRUNCATED;
  }
  if (fragment_size->desc_count > 0) {
    header->flags |= NACL_ABI_RECVMSG_DESC_TRUNCATED;
  }
}
static int BuildFragmentHeader(NaClSrpcMessageHeader* header,
                               LengthHeader* fragment_size,
                               size_t entries_to_skip,
                               NaClSrpcMessageHeader* frag_hdr) {
  size_t i;
  size_t total_bytes = 0;
  const size_t kMaxIovEntries = SIZE_T_MAX / sizeof *frag_hdr->iov;

  if (NACL_ABI_SIZE_T_MAX < header->iov_length) {
    NaClSrpcLog(NACL_SRPC_LOG_ERROR,
                "BuildFragmentHeader: iov_length too large.\n");
    return 0;
  }
  /* Copy the entire iovec, even though only part may be used. */
  frag_hdr->iov_length = header->iov_length;
  if (kMaxIovEntries < header->iov_length) {
    NaClSrpcLog(NACL_SRPC_LOG_ERROR,
                "BuildFragmentHeader: iov_length > kMaxIovEntries.\n");
    return 0;
  }
  frag_hdr->iov = (struct NaClImcMsgIoVec*)
      malloc(header->iov_length * sizeof *frag_hdr->iov);
  if (frag_hdr->iov == NULL) {
    NaClSrpcLog(NACL_SRPC_LOG_ERROR,
                "BuildFragmentHeader: iov malloc failed.\n");
    return 0;
  }
  memcpy(frag_hdr->iov,
         header->iov,
         frag_hdr->iov_length * sizeof *frag_hdr->iov);
  /* Update the iov[i].length entries. */
  for (i = entries_to_skip; i < header->iov_length; ++i) {
    size_t bytes_this_iov =
        size_min(fragment_size->byte_count - total_bytes,
                 frag_hdr->iov[i].length);
    if (bytes_this_iov == 0) {
      /* header->iov_length was checked at entry to make this safe. */
      frag_hdr->iov_length = (nacl_abi_size_t) i;
    }
    frag_hdr->iov[i].length = bytes_this_iov;
    /* Ensure that total_bytes increment doesn't overflow. */
    if (SIZE_T_MAX - bytes_this_iov < total_bytes) {
      NaClSrpcLog(NACL_SRPC_LOG_ERROR,
                  "BuildFragmentHeader: total bytes overflows.\n");
      return 0;
    }
    total_bytes += bytes_this_iov;
  }
  frag_hdr->NACL_SRPC_MESSAGE_HEADER_DESCV =
      header->NACL_SRPC_MESSAGE_HEADER_DESCV;
  frag_hdr->NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH = fragment_size->desc_count;
  return 1;
}
Пример #5
0
Window::Window(const String & in) :
    _play(false),
    _frame(0),
    _idle_init(false),
    _idle_frame(0)
{
    DJV_DEBUG(String_Format("Window::Window(%%)").arg(int64_t(this)));

    // Initialize.

    try
    {
        _movie.init(in);
    }
    catch (Error in)
    {
        DJV_AUDIO_APP->error(in);
    }

    _widget = new Image_View;
    _play_widget = new Tool_Button("playback_forward");
    _play_widget->type(Tool_Button::TOGGLE);
    _slider = new Int_Slider;

    Layout_V * layout = new Layout_V(this);
    layout->margin(0);
    layout->spacing(0);
    layout->add(_widget);
    layout->stretch(_widget);
    Layout_H * layout_h = new Layout_H(layout);
    layout_h->margin(0);
    layout_h->add(_play_widget);
    layout_h->add(_slider);
    layout_h->stretch(_slider);

    _slider->range(0, static_cast<int>(_movie.info_time().list.size()) - 1);

    dirty();
    size(_movie.info().size + V2i(0, size_min().y));

    frame_update();
    play_update();

    _play_widget->signal.set(this, play_callback);
    _slider->signal.set(this, frame_callback);
    close_signal.set(this, close_callback);

    show();
}
Пример #6
0
void dstr_ncat_dstr(struct dstr *dst, const struct dstr *str, const size_t len)
{
	size_t new_len, in_len;
	if (!str->array || !*str->array || !len)
		return;

	in_len = size_min(len, str->len);
	new_len = dst->len + in_len;

	dstr_ensure_capacity(dst, new_len + 1);
	memcpy(dst->array+dst->len, str->array, in_len);

	dst->len = new_len;
	dst->array[new_len] = 0;
}
Пример #7
0
void dstr_ncopy_dstr(struct dstr *dst, const struct dstr *str, const size_t len)
{
	size_t newlen;

	if (dst->array)
		dstr_free(dst);

	if (!len)
		return;

	newlen = size_min(len, str->len);
	dst->array = bmemdup(str->array, newlen + 1);
	dst->len   = newlen;

	dst->array[newlen] = 0;
}
Пример #8
0
/* fills "absent" positions in range specification based on response body size 
 * returns true if the range is still valid
 * range is valid if its intersection with [0,length-1] is not empty
 */
static int
httpHdrRangeSpecCanonize(HttpHdrRangeSpec * spec, size_t clen)
{
    debug(64, 5) ("httpHdrRangeSpecCanonize: have: [%d, %d) len: %d\n",
	spec->offset, spec->offset + spec->length, spec->length);
    if (!known_spec(spec->offset))	/* suffix */
	spec->offset = size_diff(clen, spec->length);
    else if (!known_spec(spec->length))		/* trailer */
	spec->length = size_diff(clen, spec->offset);
    /* we have a "range" now, adjust length if needed */
    assert(known_spec(spec->length));
    assert(known_spec(spec->offset));
    spec->length = size_min(size_diff(clen, spec->offset), spec->length);
    /* check range validity */
    debug(64, 5) ("httpHdrRangeSpecCanonize: done: [%d, %d) len: %d\n",
	spec->offset, spec->offset + spec->length, spec->length);
    return spec->length > 0;
}
Пример #9
0
int
kk_vector_realloc (kk_vector* vec, size_t cap)
{
  if (vec == NULL)
    {
      errno = EINVAL;
      return -1;
    }
  void *newptr = realloc (vec->mem, cap);
  if (newptr == NULL)
    {
      return -1;
    }
  vec->mem = newptr;
  vec->cap = cap;
  vec->len = size_min(vec->len, cap);
  return 0;
}
Пример #10
0
/* Attempts to read the specified number of bytes from the stream, returning the
 * number of bytes read. */
no_ignore size_t
ubik_stream_read(void *dst, struct ubik_stream *src, size_t len)
{
        size_t n;
        switch (src->stream_type)
        {
        case STREAM_TYPE_FILE_R:
                return fread(dst, 1, len, src->file);
        case STREAM_TYPE_FILE_W:
                return 0;
        case STREAM_TYPE_BUFFER:
                n = size_min(len, src->buffer->end - src->buffer->read);
                memcpy(dst, src->buffer->read, n);
                src->buffer->read += n;
                return n;
        case STREAM_TYPE_GENERATOR:
                if (src->gen->read != NULL)
                        return src->gen->read(dst, src->gen, len);
                return 0;
        }
        return 0;
}
Пример #11
0
no_ignore size_t
ubik_stream_drop(struct ubik_stream *src, size_t len)
{
        size_t n;
        switch (src->stream_type)
        {
        case STREAM_TYPE_FILE_R:
                if (fseek(src->file, len, SEEK_CUR) == 0)
                        return len;
                return 0;
        case STREAM_TYPE_FILE_W:
                return 0;
        case STREAM_TYPE_BUFFER:
                n = size_min(len, src->buffer->write - src->buffer->read);
                src->buffer->read += n;
                return n;
        case STREAM_TYPE_GENERATOR:
                if (src->gen->drop != NULL)
                        return src->gen->drop(src->gen, len);
                return 0;
        }
        return 0;
}
Пример #12
0
/* Warning: sizeof(nacl_abi_off_t)!=sizeof(off_t) on OSX */
int32_t NaClSysMmapIntern(struct NaClApp        *nap,
                          void                  *start,
                          size_t                length,
                          int                   prot,
                          int                   flags,
                          int                   d,
                          nacl_abi_off_t        offset) {
  int                         allowed_flags;
  struct NaClDesc             *ndp;
  uintptr_t                   usraddr;
  uintptr_t                   usrpage;
  uintptr_t                   sysaddr;
  uintptr_t                   endaddr;
  int                         mapping_code;
  uintptr_t                   map_result;
  int                         holding_app_lock;
  struct nacl_abi_stat        stbuf;
  size_t                      alloc_rounded_length;
  nacl_off64_t                file_size;
  nacl_off64_t                file_bytes;
  nacl_off64_t                host_rounded_file_bytes;
  size_t                      alloc_rounded_file_bytes;
  uint32_t                    val_flags;

  holding_app_lock = 0;
  ndp = NULL;

  allowed_flags = (NACL_ABI_MAP_FIXED | NACL_ABI_MAP_SHARED
                   | NACL_ABI_MAP_PRIVATE | NACL_ABI_MAP_ANONYMOUS);

  usraddr = (uintptr_t) start;

  if (0 != (flags & ~allowed_flags)) {
    NaClLog(2, "invalid mmap flags 0%o, ignoring extraneous bits\n", flags);
    flags &= allowed_flags;
  }

  if (0 != (flags & NACL_ABI_MAP_ANONYMOUS)) {
    /*
     * anonymous mmap, so backing store is just swap: no descriptor is
     * involved, and no memory object will be created to represent the
     * descriptor.
     */
    ndp = NULL;
  } else {
    ndp = NaClAppGetDesc(nap, d);
    if (NULL == ndp) {
      map_result = -NACL_ABI_EBADF;
      goto cleanup;
    }
  }

  mapping_code = 0;
  /*
   * Check if application is trying to do dynamic code loading by
   * mmaping a file.
   */
  if (0 != (NACL_ABI_PROT_EXEC & prot) &&
      0 != (NACL_ABI_MAP_FIXED & flags) &&
      NULL != ndp &&
      NaClSysCommonAddrRangeInAllowedDynamicCodeSpace(nap, usraddr, length)) {
    if (!nap->enable_dyncode_syscalls) {
      NaClLog(LOG_WARNING,
              "NaClSysMmap: PROT_EXEC when dyncode syscalls are disabled.\n");
      map_result = -NACL_ABI_EINVAL;
      goto cleanup;
    }
    if (0 != (NACL_ABI_PROT_WRITE & prot)) {
      NaClLog(3,
              "NaClSysMmap: asked for writable and executable code pages?!?\n");
      map_result = -NACL_ABI_EINVAL;
      goto cleanup;
    }
    mapping_code = 1;
  } else if (0 != (prot & NACL_ABI_PROT_EXEC)) {
    map_result = -NACL_ABI_EINVAL;
    goto cleanup;
  }

  /*
   * Starting address must be aligned to worst-case allocation
   * granularity.  (Windows.)
   */
  if (!NaClIsAllocPageMultiple(usraddr)) {
    if ((NACL_ABI_MAP_FIXED & flags) != 0) {
      NaClLog(2, "NaClSysMmap: address not allocation granularity aligned\n");
      map_result = -NACL_ABI_EINVAL;
      goto cleanup;
    } else {
      NaClLog(2, "NaClSysMmap: Force alignment of misaligned hint address\n");
      usraddr = NaClTruncAllocPage(usraddr);
    }
  }
  /*
   * Offset should be non-negative (nacl_abi_off_t is signed).  This
   * condition is caught when the file is stat'd and checked, and
   * offset is ignored for anonymous mappings.
   */
  if (offset < 0) {
    NaClLog(1,  /* application bug */
            "NaClSysMmap: negative file offset: %"NACL_PRId64"\n",
            (int64_t) offset);
    map_result = -NACL_ABI_EINVAL;
    goto cleanup;
  }
  /*
   * And offset must be a multiple of the allocation unit.
   */
  if (!NaClIsAllocPageMultiple((uintptr_t) offset)) {
    NaClLog(1,
            ("NaClSysMmap: file offset 0x%08"NACL_PRIxPTR" not multiple"
             " of allocation size\n"),
            (uintptr_t) offset);
    map_result = -NACL_ABI_EINVAL;
    goto cleanup;
  }

  /*
   * Round up to a page size multiple.
   *
   * Note that if length > 0xffff0000 (i.e. -NACL_MAP_PAGESIZE), rounding
   * up the length will wrap around to 0.  We check for length == 0 *after*
   * rounding up the length to simultaneously check for the length
   * originally being 0 and check for the wraparound.
   */
  alloc_rounded_length = NaClRoundAllocPage(length);
  if (alloc_rounded_length != length) {
    if (mapping_code) {
      NaClLog(3, "NaClSysMmap: length not a multiple of allocation size\n");
      map_result = -NACL_ABI_EINVAL;
      goto cleanup;
    }
    NaClLog(1,
            "NaClSysMmap: rounded length to 0x%"NACL_PRIxS"\n",
            alloc_rounded_length);
  }
  if (0 == (uint32_t) alloc_rounded_length) {
    map_result = -NACL_ABI_EINVAL;
    goto cleanup;
  }
  /*
   * Sanity check in case any later code behaves badly if
   * |alloc_rounded_length| is >=4GB.  This check shouldn't fail
   * because |length| was <4GB and we've already checked for overflow
   * when rounding it up.
   * TODO(mseaborn): Remove the need for this by using uint32_t for
   * untrusted sizes more consistently.
   */
  CHECK(alloc_rounded_length == (uint32_t) alloc_rounded_length);

  if (NULL == ndp) {
    /*
     * Note: sentinel values are bigger than the NaCl module addr space.
     */
    file_size                = kMaxUsableFileSize;
    file_bytes               = kMaxUsableFileSize;
    host_rounded_file_bytes  = kMaxUsableFileSize;
    alloc_rounded_file_bytes = kMaxUsableFileSize;
  } else {
    /*
     * We stat the file to figure out its actual size.
     *
     * This is necessary because the POSIXy interface we provide
     * allows mapping beyond the extent of a file but Windows'
     * interface does not.  We simulate the POSIX behaviour on
     * Windows.
     */
    map_result = (*((struct NaClDescVtbl const *) ndp->base.vtbl)->
                  Fstat)(ndp, &stbuf);
    if (0 != map_result) {
      goto cleanup;
    }

    /*
     * Preemptively refuse to map anything that's not a regular file or
     * shared memory segment.  Other types usually report st_size of zero,
     * which the code below will handle by just doing a dummy PROT_NONE
     * mapping for the requested size and never attempting the underlying
     * NaClDesc Map operation.  So without this check, the host OS never
     * gets the chance to refuse the mapping operation on an object that
     * can't do it.
     */
    if (!NACL_ABI_S_ISREG(stbuf.nacl_abi_st_mode) &&
        !NACL_ABI_S_ISSHM(stbuf.nacl_abi_st_mode)) {
      map_result = -NACL_ABI_ENODEV;
      goto cleanup;
    }

    /*
     * BUG(bsy): there's a race between this fstat and the actual mmap
     * below.  It's probably insoluble.  Even if we fstat again after
     * mmap and compared, the mmap could have "seen" the file with a
     * different size, after which the racing thread restored back to
     * the same value before the 2nd fstat takes place.
     */
    file_size = stbuf.nacl_abi_st_size;

    if (file_size < offset) {
      map_result = -NACL_ABI_EINVAL;
      goto cleanup;
    }

    file_bytes = file_size - offset;
    if ((nacl_off64_t) kMaxUsableFileSize < file_bytes) {
      host_rounded_file_bytes = kMaxUsableFileSize;
    } else {
      host_rounded_file_bytes = NaClRoundHostAllocPage((size_t) file_bytes);
    }

    ASSERT(host_rounded_file_bytes <= (nacl_off64_t) kMaxUsableFileSize);
    /*
     * We need to deal with NaClRoundHostAllocPage rounding up to zero
     * from ~0u - n, where n < 4096 or 65536 (== 1 alloc page).
     *
     * Luckily, file_bytes is at most kMaxUsableFileSize which is
     * smaller than SIZE_T_MAX, so it should never happen, but we
     * leave the explicit check below as defensive programming.
     */
    alloc_rounded_file_bytes =
      NaClRoundAllocPage((size_t) host_rounded_file_bytes);

    if (0 == alloc_rounded_file_bytes && 0 != host_rounded_file_bytes) {
      map_result = -NACL_ABI_ENOMEM;
      goto cleanup;
    }

    /*
     * NB: host_rounded_file_bytes and alloc_rounded_file_bytes can be
     * zero.  Such an mmap just makes memory (offset relative to
     * usraddr) in the range [0, alloc_rounded_length) inaccessible.
     */
  }

  /*
   * host_rounded_file_bytes is how many bytes we can map from the
   * file, given the user-supplied starting offset.  It is at least
   * one page.  If it came from a real file, it is a multiple of
   * host-OS allocation size.  it cannot be larger than
   * kMaxUsableFileSize.
   */
  if (mapping_code && (size_t) file_bytes < alloc_rounded_length) {
    NaClLog(3,
            "NaClSysMmap: disallowing partial allocation page extension for"
            " short files\n");
    map_result = -NACL_ABI_EINVAL;
    goto cleanup;
  }
  length = size_min(alloc_rounded_length, (size_t) host_rounded_file_bytes);

  /*
   * Lock the addr space.
   */
  NaClXMutexLock(&nap->mu);

  NaClVmHoleOpeningMu(nap);

  holding_app_lock = 1;

  if (0 == (flags & NACL_ABI_MAP_FIXED)) {
    /*
     * The user wants us to pick an address range.
     */
    if (0 == usraddr) {
      /*
       * Pick a hole in addr space of appropriate size, anywhere.
       * We pick one that's best for the system.
       */
      usrpage = NaClVmmapFindMapSpace(&nap->mem_map,
                                      alloc_rounded_length >> NACL_PAGESHIFT);
      NaClLog(4, "NaClSysMmap: FindMapSpace: page 0x%05"NACL_PRIxPTR"\n",
              usrpage);
      if (0 == usrpage) {
        map_result = -NACL_ABI_ENOMEM;
        goto cleanup;
      }
      usraddr = usrpage << NACL_PAGESHIFT;
      NaClLog(4, "NaClSysMmap: new starting addr: 0x%08"NACL_PRIxPTR
              "\n", usraddr);
    } else {
Пример #13
0
/* Use the predecessors in the given map to write the BFS levels to the high 16
 * bits of each element in pred; this also catches some problems in pred
 * itself.  Returns true if the predecessor map is valid. */
static int build_bfs_depth_map(const int64_t nglobalverts, const size_t nlocalverts, const size_t maxlocalverts, const int64_t root, int64_t* const pred) {
  (void)nglobalverts;
  int validation_passed = 1;
  int root_owner;

  size_t root_local;
  get_vertex_distribution_for_pred(1, &root, &root_owner, &root_local);
  int root_is_mine = (root_owner == rank);
  if (root_is_mine) assert (root_local < nlocalverts);

  {
    ptrdiff_t i;
#pragma omp parallel for
    for (i = 0; i < (ptrdiff_t)nlocalverts; ++i) write_pred_entry_depth(&pred[i], UINT16_MAX);
    if (root_is_mine) write_pred_entry_depth(&pred[root_local], 0);
  }
  int64_t* restrict pred_pred = (int64_t*)xMPI_Alloc_mem(size_min(CHUNKSIZE, nlocalverts) * sizeof(int64_t)); /* Predecessor info of predecessor vertex for each local vertex */
  gather* pred_win = init_gather((void*)pred, nlocalverts, sizeof(int64_t), pred_pred, size_min(CHUNKSIZE, nlocalverts), size_min(CHUNKSIZE, nlocalverts), MPI_INT64_T);
  int64_t* restrict pred_vtx = (int64_t*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(int64_t)); /* Vertex (not depth) part of pred map */
  int* restrict pred_owner = (int*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(int));
  size_t* restrict pred_local = (size_t*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(size_t));
  int iter_number = 0;
  {
    /* Iteratively update depth[v] = min(depth[v], depth[pred[v]] + 1) [saturating at UINT16_MAX] until no changes. */
    while (1) {
      ++iter_number;
      int any_changes = 0;
      ptrdiff_t ii;
      for (ii = 0; ii < (ptrdiff_t)maxlocalverts; ii += CHUNKSIZE) {
        ptrdiff_t i_start = ptrdiff_min(ii, nlocalverts);
        ptrdiff_t i_end = ptrdiff_min(ii + CHUNKSIZE, nlocalverts);
        begin_gather(pred_win);
        ptrdiff_t i;
        assert (i_start >= 0 && i_start <= (ptrdiff_t)nlocalverts);
        assert (i_end >= 0 && i_end <= (ptrdiff_t)nlocalverts);
#pragma omp parallel for
        for (i = i_start; i < i_end; ++i) {
          pred_vtx[i - i_start] = get_pred_from_pred_entry(pred[i]);
        }
        get_vertex_distribution_for_pred(i_end - i_start, pred_vtx, pred_owner, pred_local);
#pragma omp parallel for
        for (i = i_start; i < i_end; ++i) {
          if (pred[i] != -1) {
            add_gather_request(pred_win, i - i_start, pred_owner[i - i_start], pred_local[i - i_start], i - i_start); //shit happened here first
          } else {
            pred_pred[i - i_start] = -1;
          }
        }
        end_gather(pred_win);

#pragma omp parallel for reduction(&&:validation_passed) reduction(||:any_changes)
        for (i = i_start; i < i_end; ++i) {
          if (rank == root_owner && (size_t)i == root_local) continue;
          if (get_depth_from_pred_entry(pred_pred[i - i_start]) != UINT16_MAX) {
            if (get_depth_from_pred_entry(pred[i]) != UINT16_MAX && get_depth_from_pred_entry(pred[i]) != get_depth_from_pred_entry(pred_pred[i - i_start]) + 1) {
              fprintf(stderr, "%d: Validation error: BFS predecessors do not form a tree; see vertices %" PRId64 " (depth %" PRIu16 ") and %" PRId64 " (depth %" PRIu16 ").\n", rank, vertex_to_global_for_pred(rank, i), get_depth_from_pred_entry(pred[i]), get_pred_from_pred_entry(pred[i]), get_depth_from_pred_entry(pred_pred[i - i_start]));
              validation_passed = 0;
            } else if (get_depth_from_pred_entry(pred[i]) == get_depth_from_pred_entry(pred_pred[i - i_start]) + 1) {
              /* Nothing to do */
            } else {
              write_pred_entry_depth(&pred[i], get_depth_from_pred_entry(pred_pred[i - i_start]) + 1);
              any_changes = 1;
            }
          }
        }
      }
      MPI_Allreduce(MPI_IN_PLACE, &any_changes, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD);
      if (!any_changes) break;
    }
  }
  destroy_gather(pred_win);
  MPI_Free_mem(pred_pred);
  free(pred_owner);
  free(pred_local);
  free(pred_vtx);
  return validation_passed;
}
Пример #14
0
/* Returns true if result is valid.  Also, updates high 16 bits of each element
 * of pred to contain the BFS level number (or -1 if not visited) of each
 * vertex; this is based on the predecessor map if the user didn't provide it.
 * */
int validate_bfs_result(const tuple_graph* const tg, const int64_t nglobalverts, const size_t nlocalverts, const int64_t root, int64_t* const pred, int64_t* const edge_visit_count_ptr) {

  assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges);
  assert (pred);
  *edge_visit_count_ptr = 0; /* Ensure it is a valid pointer */
  int ranges_ok = check_value_ranges(nglobalverts, nlocalverts, pred);
  if (root < 0 || root >= nglobalverts) {
    fprintf(stderr, "%d: Validation error: root vertex %" PRId64 " is invalid.\n", rank, root);
    ranges_ok = 0;
  }
  if (!ranges_ok) return 0; /* Fail */

  assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges);
  assert (pred);

  int validation_passed = 1;
  int root_owner;
  size_t root_local;
  get_vertex_distribution_for_pred(1, &root, &root_owner, &root_local);
  int root_is_mine = (root_owner == rank);

  /* Get maximum values so loop counts are consistent across ranks. */
  uint64_t maxlocalverts_ui = nlocalverts;
  MPI_Allreduce(MPI_IN_PLACE, &maxlocalverts_ui, 1, MPI_UINT64_T, MPI_MAX, MPI_COMM_WORLD);
  size_t maxlocalverts = (size_t)maxlocalverts_ui;

  ptrdiff_t max_bufsize = tuple_graph_max_bufsize(tg);
  ptrdiff_t edge_chunk_size = ptrdiff_min(HALF_CHUNKSIZE, max_bufsize);

  assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges);
  assert (pred);

  /* Check that root is its own parent. */
  if (root_is_mine) {
    assert (root_local < nlocalverts);
    if (get_pred_from_pred_entry(pred[root_local]) != root) {
      fprintf(stderr, "%d: Validation error: parent of root vertex %" PRId64 " is %" PRId64 ", not the root itself.\n", rank, root, get_pred_from_pred_entry(pred[root_local]));
      validation_passed = 0;
    }
  }

  assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges);
  assert (pred);
  
  /* Check that nothing else is its own parent. */
  {
    int* restrict pred_owner = (int*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(int));
    size_t* restrict pred_local = (size_t*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(size_t));
    int64_t* restrict pred_vtx = (int64_t*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(int64_t)); /* Vertex (not depth) part of pred map */
    ptrdiff_t ii;
    for (ii = 0; ii < (ptrdiff_t)nlocalverts; ii += CHUNKSIZE) {
      ptrdiff_t i_start = ii;
      ptrdiff_t i_end = ptrdiff_min(ii + CHUNKSIZE, nlocalverts);
      ptrdiff_t i;
      assert (i_start >= 0 && i_start <= (ptrdiff_t)nlocalverts);
      assert (i_end >= 0 && i_end <= (ptrdiff_t)nlocalverts);
#pragma omp parallel for
      for (i = i_start; i < i_end; ++i) {
        pred_vtx[i - i_start] = get_pred_from_pred_entry(pred[i]);
      }
      get_vertex_distribution_for_pred(i_end - i_start, pred_vtx, pred_owner, pred_local);
#pragma omp parallel for reduction(&&:validation_passed)
      for (i = i_start; i < i_end; ++i) {

        if ((!root_is_mine || (size_t)i != root_local) &&
            get_pred_from_pred_entry(pred[i]) != -1 &&
            pred_owner[i - i_start] == rank &&
            pred_local[i - i_start] == (size_t)i) {
          fprintf(stderr, "%d: Validation error: parent of non-root vertex %" PRId64 " is itself.\n", rank, vertex_to_global_for_pred(rank, i));
          validation_passed = 0;
        }
      }
    }
    free(pred_owner);
    free(pred_local);
    free(pred_vtx);
  }

  assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges);
  assert (pred);

  if (bfs_writes_depth_map()) {
    int check_ok = check_bfs_depth_map_using_predecessors(tg, nglobalverts, nlocalverts, maxlocalverts, root, pred);
    if (!check_ok) validation_passed = 0;
  } else {
    
    /* Create a vertex depth map to use for later validation. */
    int pred_ok = build_bfs_depth_map(nglobalverts, nlocalverts, maxlocalverts, root, pred); //shit happened here
    if (!pred_ok) validation_passed = 0;
  }

  {
    /* Check that all edges connect vertices whose depths differ by at most
     * one, and check that there is an edge from each vertex to its claimed
     * predecessor.  Also, count visited edges (including duplicates and
     * self-loops).  */
    unsigned char* restrict pred_valid = (unsigned char*)xMPI_Alloc_mem(nlocalverts * sizeof(unsigned char));
    memset(pred_valid, 0, nlocalverts * sizeof(unsigned char));
    int64_t* restrict edge_endpoint = (int64_t*)xmalloc(2 * edge_chunk_size * sizeof(int64_t));
    int* restrict edge_owner = (int*)xmalloc(2 * edge_chunk_size * sizeof(int));
    size_t* restrict edge_local = (size_t*)xmalloc(2 * edge_chunk_size * sizeof(size_t));
    int64_t* restrict edge_preds = (int64_t*)xMPI_Alloc_mem(2 * edge_chunk_size * sizeof(int64_t));
    gather* pred_win = init_gather((void*)pred, nlocalverts, sizeof(int64_t), edge_preds, 2 * edge_chunk_size, 2 * edge_chunk_size, MPI_INT64_T);
    unsigned char one = 1;
    scatter_constant* pred_valid_win = init_scatter_constant((void*)pred_valid, nlocalverts, sizeof(unsigned char), &one, 2 * edge_chunk_size, MPI_UNSIGNED_CHAR);
    int64_t edge_visit_count = 0;
    ITERATE_TUPLE_GRAPH_BEGIN(tg, buf, bufsize) {
      ptrdiff_t ii;
      for (ii = 0; ii < max_bufsize; ii += HALF_CHUNKSIZE) {
        ptrdiff_t i_start = ptrdiff_min(ii, bufsize);
        ptrdiff_t i_end = ptrdiff_min(ii + HALF_CHUNKSIZE, bufsize);
        assert (i_end - i_start <= edge_chunk_size);
        ptrdiff_t i;
#pragma omp parallel for
        for (i = i_start; i < i_end; ++i) {
          int64_t v0 = get_v0_from_edge(&buf[i]);
          int64_t v1 = get_v1_from_edge(&buf[i]);
          edge_endpoint[(i - i_start) * 2 + 0] = v0;
          edge_endpoint[(i - i_start) * 2 + 1] = v1;
        }
        get_vertex_distribution_for_pred(2 * (i_end - i_start), edge_endpoint, edge_owner, edge_local);
        begin_gather(pred_win);
#pragma omp parallel for
        for (i = i_start; i < i_end; ++i) {
          add_gather_request(pred_win, (i - i_start) * 2 + 0, edge_owner[(i - i_start) * 2 + 0], edge_local[(i - i_start) * 2 + 0], (i - i_start) * 2 + 0);
          add_gather_request(pred_win, (i - i_start) * 2 + 1, edge_owner[(i - i_start) * 2 + 1], edge_local[(i - i_start) * 2 + 1], (i - i_start) * 2 + 1);
        }
        end_gather(pred_win);
        begin_scatter_constant(pred_valid_win);
#pragma omp parallel for reduction(&&:validation_passed) reduction(+:edge_visit_count)
        for (i = i_start; i < i_end; ++i) {
          int64_t src = get_v0_from_edge(&buf[i]);
          int64_t tgt = get_v1_from_edge(&buf[i]);
          uint16_t src_depth = get_depth_from_pred_entry(edge_preds[(i - i_start) * 2 + 0]);
          uint16_t tgt_depth = get_depth_from_pred_entry(edge_preds[(i - i_start) * 2 + 1]);
          if (src_depth != UINT16_MAX && tgt_depth == UINT16_MAX) {
            fprintf(stderr, "%d: Validation error: edge connects vertex %" PRId64 " in the BFS tree (depth %" PRIu16 ") to vertex %" PRId64 " outside the tree.\n", rank, src, src_depth, tgt);
            validation_passed = 0;
          } else if (src_depth == UINT16_MAX && tgt_depth != UINT16_MAX) {
            fprintf(stderr, "%d: Validation error: edge connects vertex %" PRId64 " in the BFS tree (depth %" PRIu16 ") to vertex %" PRId64 " outside the tree.\n", rank, tgt, tgt_depth, src);
            validation_passed = 0;
          } else if (src_depth - tgt_depth < -1 ||
                     src_depth - tgt_depth > 1) {
            fprintf(stderr, "%d: Validation error: depths of edge endpoints %" PRId64 " (depth %" PRIu16 ") and %" PRId64 " (depth %" PRIu16 ") are too far apart (abs. val. > 1).\n", rank, src, src_depth, tgt, tgt_depth);
            validation_passed = 0;
          } else if (src_depth != UINT16_MAX) {
            ++edge_visit_count;
          }
          if (get_pred_from_pred_entry(edge_preds[(i - i_start) * 2 + 0]) == tgt) {
            add_scatter_constant_request(pred_valid_win, edge_owner[(i - i_start) * 2 + 0], edge_local[(i - i_start) * 2 + 0], (i - i_start) * 2 + 0);
          }
          if (get_pred_from_pred_entry(edge_preds[(i - i_start) * 2 + 1]) == src) {
            add_scatter_constant_request(pred_valid_win, edge_owner[(i - i_start) * 2 + 1], edge_local[(i - i_start) * 2 + 1], (i - i_start) * 2 + 1);
          }
        }
        end_scatter_constant(pred_valid_win);
      }
    } ITERATE_TUPLE_GRAPH_END;
    destroy_gather(pred_win);
    MPI_Free_mem(edge_preds);
    free(edge_owner);
    free(edge_local);
    free(edge_endpoint);
    destroy_scatter_constant(pred_valid_win);
    ptrdiff_t i;
#pragma omp parallel for reduction(&&:validation_passed)
    for (i = 0; i < (ptrdiff_t)nlocalverts; ++i) {
      int64_t p = get_pred_from_pred_entry(pred[i]);
      if (p == -1) continue;
      int found_pred_edge = pred_valid[i];
      if (root_owner == rank && root_local == (size_t)i) found_pred_edge = 1; /* Root vertex */
      if (!found_pred_edge) {
        int64_t v = vertex_to_global_for_pred(rank, i);
        fprintf(stderr, "%d: Validation error: no graph edge from vertex %" PRId64 " to its parent %" PRId64 ".\n", rank, v, get_pred_from_pred_entry(pred[i]));
        validation_passed = 0;
      }
    }
    MPI_Free_mem(pred_valid);

    MPI_Allreduce(MPI_IN_PLACE, &edge_visit_count, 1, MPI_INT64_T, MPI_SUM, MPI_COMM_WORLD);
    *edge_visit_count_ptr = edge_visit_count;
  }
Пример #15
0
/* Check the BFS levels in pred against the predecessors given there.  Returns
 * true if the maps are valid. */
static int check_bfs_depth_map_using_predecessors(const tuple_graph* const tg, const int64_t nglobalverts, const size_t nlocalverts, const size_t maxlocalverts, const int64_t root, const int64_t* const pred) {
  (void)nglobalverts; /* Avoid warning */
  assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges);
  assert (root >= 0 && root < nglobalverts);
  assert (nglobalverts >= 0);
  assert (pred);

  int validation_passed = 1;
  int root_owner;
  size_t root_local;
  get_vertex_distribution_for_pred(1, &root, &root_owner, &root_local);
  int root_is_mine = (root_owner == rank);
  if (root_is_mine) assert (root_local < nlocalverts);

  {
    ptrdiff_t i;
    if (root_is_mine && get_depth_from_pred_entry(pred[root_local]) != 0) {
      fprintf(stderr, "%d: Validation error: depth of root vertex %" PRId64 " is %" PRIu16 ", not 0.\n", rank, root, get_depth_from_pred_entry(pred[root_local]));
      validation_passed = 0;
    }
#pragma omp parallel for reduction(&&:validation_passed)
    for (i = 0; i < (ptrdiff_t)nlocalverts; ++i) {
      if (get_pred_from_pred_entry(pred[i]) == -1 &&
          get_depth_from_pred_entry(pred[i]) != UINT16_MAX) {
        fprintf(stderr, "%d: Validation error: depth of vertex %" PRId64 " with no predecessor is %" PRIu16 ", not UINT16_MAX.\n", rank, vertex_to_global_for_pred(rank, i), get_depth_from_pred_entry(pred[i]));
        validation_passed = 0;
      } else if (get_pred_from_pred_entry(pred[i]) != -1 &&
                 get_depth_from_pred_entry(pred[i]) == UINT16_MAX) {
        fprintf(stderr, "%d: Validation error: predecessor of claimed unreachable vertex %" PRId64 " is %" PRId64 ", not -1.\n", rank, vertex_to_global_for_pred(rank, i), get_pred_from_pred_entry(pred[i]));
        validation_passed = 0;
      }
    }
  }
  int64_t* restrict pred_pred = (int64_t*)xMPI_Alloc_mem(size_min(CHUNKSIZE, nlocalverts) * sizeof(int64_t)); /* Predecessor info of predecessor vertex for each local vertex */
  gather* pred_win = init_gather((void*)pred, nlocalverts, sizeof(int64_t), pred_pred, size_min(CHUNKSIZE, nlocalverts), size_min(CHUNKSIZE, nlocalverts), MPI_INT64_T);
  int64_t* restrict pred_vtx = (int64_t*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(int64_t)); /* Vertex (not depth) part of pred map */
  int* restrict pred_owner = (int*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(int));
  size_t* restrict pred_local = (size_t*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(size_t));
  size_t ii;
  for (ii = 0; ii < maxlocalverts; ii += CHUNKSIZE) {
    ptrdiff_t i_start = ptrdiff_min(ii, nlocalverts);
    ptrdiff_t i_end = ptrdiff_min(ii + CHUNKSIZE, nlocalverts);
    begin_gather(pred_win);
    ptrdiff_t i;
    assert (i_start >= 0 && i_start <= (ptrdiff_t)nlocalverts);
    assert (i_end >= 0 && i_end <= (ptrdiff_t)nlocalverts);
    assert (i_end >= i_start);
    assert (i_end - i_start >= 0 && i_end - i_start <= (ptrdiff_t)size_min(CHUNKSIZE, nlocalverts));
#pragma omp parallel for
    for (i = i_start; i < i_end; ++i) {
      pred_vtx[i - i_start] = get_pred_from_pred_entry(pred[i]);
    }
    get_vertex_distribution_for_pred(i_end - i_start, pred_vtx, pred_owner, pred_local);
#pragma omp parallel for
    for (i = i_start; i < i_end; ++i) {
      if (pred[i] != -1) {
        add_gather_request(pred_win, i - i_start, pred_owner[i - i_start], pred_local[i - i_start], i - i_start);
      } else {
        pred_pred[i - i_start] = -1;
      }
    }
    end_gather(pred_win);
#pragma omp parallel for reduction(&&:validation_passed)
    for (i = i_start; i < i_end; ++i) {
      if (rank == root_owner && (size_t)i == root_local) continue;
      if (get_pred_from_pred_entry(pred[i]) == -1) continue; /* Already checked */
      if (get_depth_from_pred_entry(pred_pred[i - i_start]) == UINT16_MAX) {
        fprintf(stderr, "%d: Validation error: predecessor %" PRId64 " of vertex %" PRId64 " (depth %" PRIu16 ") is marked as unreachable.\n", rank, get_pred_from_pred_entry(pred[i]), vertex_to_global_for_pred(rank, i), get_depth_from_pred_entry(pred[i]));
        validation_passed = 0;
      }
      if (get_depth_from_pred_entry(pred[i]) != get_depth_from_pred_entry(pred_pred[i - i_start]) + 1) {
        fprintf(stderr, "%d: Validation error: BFS predecessors do not form a tree; see vertices %" PRId64 " (depth %" PRIu16 ") and %" PRId64 " (depth %" PRIu16 ").\n", rank, vertex_to_global_for_pred(rank, i), get_depth_from_pred_entry(pred[i]), get_pred_from_pred_entry(pred[i]), get_depth_from_pred_entry(pred_pred[i - i_start]));
        validation_passed = 0;
      }
    }
  }
  destroy_gather(pred_win);
  MPI_Free_mem(pred_pred);
  free(pred_owner);
  free(pred_local);
  free(pred_vtx);
  return validation_passed;
}
Пример #16
0
/*
 * Receive a message from channel.  On success it returns the number of
 * bytes read; otherwise, returns -1.
 */
ssize_t NaClSrpcMessageChannelReceive(struct NaClSrpcMessageChannel* channel,
                                      NaClSrpcMessageHeader* header) {
  /*
   * TODO(sehr): A large prefix of this function is common with Peek.
   * Find a way to merge them.
   */
  ssize_t imc_ret = -1;
  NaClSrpcMessageHeader header_copy;
  struct NaClImcMsgIoVec* iovec = NULL;
  LengthHeader total_size;
  LengthHeader fragment_size;
  LengthHeader processed_size;
  size_t bytes_received;
  size_t descs_received;
  ssize_t retval = -NACL_ABI_EINVAL;

  NaClSrpcLog(3, "NaClSrpcMessageChannelReceive: waiting for message.\n");
  /*
   * The first fragment consists of two LengthHeaders and a fraction of the
   * bytes (starting at 0) and the fraction of descs (starting at 0).
   */
  iovec = CopyAndAddIovs(header->iov, header->iov_length, 2);
  if (NULL == iovec) {
    NaClSrpcLog(NACL_SRPC_LOG_ERROR,
                "NaClSrpcMessageChannelReceive: CopyAndAddIovs failed.\n");
    goto done;
  }
  header_copy.iov = iovec;
  header_copy.iov_length = header->iov_length + 2;
  header_copy.NACL_SRPC_MESSAGE_HEADER_DESCV =
      header->NACL_SRPC_MESSAGE_HEADER_DESCV;
  /* SRPC_DESC_MAX <= NACL_ABI_SIZE_T_MAX, so the cast is safe. */
  header_copy.NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH = (nacl_abi_size_t)
      size_min(SRPC_DESC_MAX, header->NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH);
  header_copy.iov[0].base = &total_size;
  header_copy.iov[0].length = sizeof total_size;
  header_copy.iov[1].base = &fragment_size;
  header_copy.iov[1].length = sizeof fragment_size;
  header_copy.flags = 0;
  if (-1 == HeaderTotalBytes(&header_copy, 0)) {
    NaClSrpcLog(NACL_SRPC_LOG_ERROR,
                "NaClSrpcMessageChannelReceive: header size overflow.\n");
    goto done;
  }
  /*
   * The message receive should return at least
   * kFragmentOverhead[FIRST_FRAGMENT] bytes.
   */
  imc_ret = MessageChannelBufferRead(channel, &header_copy, 0);
  if (imc_ret < (ssize_t) kFragmentOverhead[FIRST_FRAGMENT]) {
    NaClSrpcLog(NACL_SRPC_LOG_ERROR,
                "NaClSrpcMessageChannelReceive: read failed (%"NACL_PRIdS").\n",
                imc_ret);
    retval = ErrnoFromImcRet(imc_ret);
    goto done;
  }
  /* Comparison above guarantees no underflow. */
  bytes_received = imc_ret - kFragmentOverhead[FIRST_FRAGMENT];
  descs_received = header_copy.NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH;
  if (!MessageLengthsAreSane(
          &total_size,
          &fragment_size,
          (size_t) imc_ret,
          header_copy.NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH)) {
    NaClSrpcLog(NACL_SRPC_LOG_ERROR,
                "NaClSrpcMessageChannelReceive:"
                " first fragment descriptor check failed.\n");
    retval = -NACL_ABI_EIO;
    goto done;
  }
  NaClSrpcLog(3,
              "NaClSrpcMessageChannelReceive:"
              " new message, bytes %"NACL_PRIdNACL_SIZE
              ", descs %"NACL_PRIdNACL_SIZE".\n",
              total_size.byte_count,
              total_size.desc_count);
  NaClSrpcLog(3,
              "NaClSrpcMessageChannelReceive:"
              " first fragment, bytes %"NACL_PRIdNACL_SIZE
              ", descs %"NACL_PRIdNACL_SIZE".\n",
              fragment_size.byte_count,
              fragment_size.desc_count);
  processed_size = fragment_size;
  ConsumeFragment(&header_copy, &fragment_size, 2);
  /*
   * Get the remaining fragments.
   */
  while (processed_size.byte_count < total_size.byte_count ||
         processed_size.desc_count < total_size.desc_count) {
    /*
     * The non-first fragments consist of a single LengthHeader and a
     * portion of the remaining iov entries and descv entries.  We add the
     * fragment length descriptor to the preceding iov entry, which is safe,
     * because we know that ConsumeFragment always consumes at least the
     * fragment length descriptor from last time.
     */
    header_copy.iov = header_copy.iov - 1;
    header_copy.iov_length = header_copy.iov_length + 1;
    header_copy.iov[0].base = &fragment_size;
    header_copy.iov[0].length = sizeof fragment_size;
    header_copy.NACL_SRPC_MESSAGE_HEADER_DESCV =
        header->NACL_SRPC_MESSAGE_HEADER_DESCV + descs_received;
    header_copy.NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH = (nacl_abi_size_t)
        size_min(SRPC_DESC_MAX,
                 (header->NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH -
                  descs_received));
    if (-1 == HeaderTotalBytes(&header_copy, 0)) {
      NaClSrpcLog(NACL_SRPC_LOG_ERROR,
                  "NaClSrpcMessageChannelReceive: header size overflow.\n");
      goto done;
    }
    /*
     * The message receive should return at least
     * kFragmentOverhead[LATER_FRAGMENT] bytes.  This is needed to make sure
     * that we can correctly maintain the index into bytes and descs.
     */
    imc_ret = ImcRecvmsg(channel->desc.raw_desc, &header_copy, 0);
    if (imc_ret < (ssize_t) kFragmentOverhead[LATER_FRAGMENT]) {
      NaClSrpcLog(NACL_SRPC_LOG_ERROR,
                  "NaClSrpcMessageChannelReceive: read failed (%"
                  NACL_PRIdS").\n",
                  imc_ret);
      retval = ErrnoFromImcRet(imc_ret);
      goto done;
    }
    /* Comparison above guarantees no underflow. */
    bytes_received += imc_ret - kFragmentOverhead[LATER_FRAGMENT];
    descs_received += header_copy.NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH;
    if (!FragmentLengthIsSane(
            &fragment_size,
            (size_t) imc_ret,
            header_copy.NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH)) {
      NaClSrpcLog(NACL_SRPC_LOG_ERROR,
                  "NaClSrpcMessageChannelReceive:"
                  " other fragment descriptor check failed.\n");
      retval = -NACL_ABI_EIO;
      goto done;
    }
    NaClSrpcLog(3,
                "NaClSrpcMessageChannelReceive:"
                " next fragment, bytes %"NACL_PRIdNACL_SIZE
                ", descs %"NACL_PRIdNACL_SIZE".\n",
                fragment_size.byte_count,
                fragment_size.desc_count);
    processed_size.byte_count += fragment_size.byte_count;
    processed_size.desc_count += fragment_size.desc_count;
    ConsumeFragment(&header_copy, &fragment_size, 1);
  }
  NaClSrpcLog(3,
              "NaClSrpcMessageChannelReceive:"
              " succeeded, read %"NACL_PRIdS" bytes and %"
              NACL_PRIdNACL_SIZE" descs.\n",
              bytes_received,
              processed_size.desc_count);
  retval = (ssize_t) bytes_received;
  header->NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH =
      (nacl_abi_size_t) descs_received;
  header->flags = header_copy.flags;

 done:
  free(iovec);
  return retval;
}
Пример #17
0
/*
 * Peek a message from channel.  Reads the first fragment of the message and
 * leaves it available for future calls to Peek or Receive.
 */
ssize_t NaClSrpcMessageChannelPeek(struct NaClSrpcMessageChannel* channel,
                                   NaClSrpcMessageHeader* header) {
  /*
   * TODO(sehr): Most of this function is common with Receive.
   * Find a way to merge them.
   */
  struct NaClImcMsgIoVec* iovec = NULL;
  NaClSrpcMessageHeader header_copy;
  LengthHeader total_size;
  LengthHeader fragment_size;
  ssize_t imc_ret;
  ssize_t retval = -NACL_ABI_EINVAL;

  /* Append the fragment headers to the iov. */
  iovec = CopyAndAddIovs(header->iov, header->iov_length, 2);
  if (NULL == iovec) {
    NaClSrpcLog(NACL_SRPC_LOG_ERROR,
                "NaClSrpcMessageChannelPeek: CopyAndAddIovs failed.\n");
    return -1;
  }
  header_copy.iov = iovec;
  header_copy.iov_length = header->iov_length + 2;
  header_copy.NACL_SRPC_MESSAGE_HEADER_DESCV =
      header->NACL_SRPC_MESSAGE_HEADER_DESCV;
  /* SRPC_DESC_MAX <= NACL_ABI_SIZE_T_MAX, so the cast is safe. */
  header_copy.NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH = (nacl_abi_size_t)
      size_min(SRPC_DESC_MAX, header->NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH);
  header_copy.iov[0].base = &total_size;
  header_copy.iov[0].length = sizeof total_size;
  header_copy.iov[1].base = &fragment_size;
  header_copy.iov[1].length = sizeof fragment_size;
  header_copy.flags = 0;
  if (-1 == HeaderTotalBytes(&header_copy, 0)) {
    NaClSrpcLog(NACL_SRPC_LOG_ERROR,
                "NaClSrpcMessageChannelPeek: header size overflow.\n");
    goto done;
  }
  NaClSrpcLog(3,
              "NaClSrpcMessageChannelPeek: read message bytes %"
              NACL_PRIdS", descs %"NACL_PRIdS".\n",
              channel->byte_count,
              channel->desc_count);
  imc_ret = MessageChannelBufferRead(channel, &header_copy, 1);
  if (imc_ret < (ssize_t) kFragmentOverhead[FIRST_FRAGMENT]) {
    NaClSrpcLog(3,
                "NaClSrpcMessageChannelPeek: read failed (%"NACL_PRIdS").\n",
                imc_ret);
    retval = ErrnoFromImcRet(imc_ret);
    goto done;
  }
  header->flags = header_copy.flags;
  header->NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH =
      header_copy.NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH;
  NaClSrpcLog(3,
              "NaClSrpcMessageChannelPeek: flags %x.\n",
              header->flags);
  if (!MessageLengthsAreSane(
           &total_size,
           &fragment_size,
           (size_t) imc_ret,
           header_copy.NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH)) {
    NaClSrpcLog(NACL_SRPC_LOG_ERROR,
                "NaClSrpcMessageChannelPeek: message length mismatch.\n");
    retval = -NACL_ABI_EIO;
    goto done;
  }
  /* Comparison above guarantees no underflow. */
  retval = imc_ret - kFragmentOverhead[FIRST_FRAGMENT];

 done:
  free(iovec);
  return retval;
}
Пример #18
0
/*
 * Read from channel's buffer.
 */
static ssize_t MessageChannelBufferRead(struct NaClSrpcMessageChannel* channel,
                                        NaClSrpcMessageHeader* header,
                                        int peeking) {
  size_t i;
  size_t byte_count = 0;
  size_t iov_read_size;
  size_t descv_read_count;

  /*
   * If there are no bytes or descriptors in the buffer, fill the buffer
   * by reading the first fragment.
   */
  if (channel->byte_count == 0 && channel->desc_count == 0) {
    if (!peeking) {
      /* A read with an empty buffer just reads. */
      return ImcRecvmsg(channel->desc.raw_desc, header, 0);
    }
    /* Peeking needs to read the first fragment into the buffer. */
    if (!MessageChannelBufferFirstFragment(channel)) {
      NaClSrpcLog(3,
                  "MessageChannelBufferRead: couldn't buffer.\n");
      return -1;
    }
  }
  header->flags = 0;
  NaClSrpcLog(3,
              "MessageChannelBufferRead: channel->byte_count=%"NACL_PRIdS".\n",
              channel->byte_count);
  for (i = 0; i < header->iov_length; ++i) {
    NaClSrpcLog(3,
                "MessageChannelBufferRead: bytes %"NACL_PRIdS" chan %"
                NACL_PRIdS".\n",
                byte_count,
                channel->byte_count);
    if (channel->byte_count < byte_count) {
      NaClSrpcLog(NACL_SRPC_LOG_ERROR,
                  "MessageChannelBufferRead: overflow.\n");
      return -1;
    }
    iov_read_size =
        size_min(channel->byte_count - byte_count, header->iov[i].length);
    if (SIZE_T_MAX - byte_count < iov_read_size) {
      NaClSrpcLog(NACL_SRPC_LOG_ERROR,
                  "MessageChannelBufferRead: overflow.\n");
      return -1;
    }
    memcpy(header->iov[i].base, channel->bytes + byte_count, iov_read_size);
    byte_count += iov_read_size;
    if (byte_count == channel->byte_count) {
      /* We have read the entire contents of the buffer. */
      NaClSrpcLog(3,
                  "MessageChannelBufferRead: break\n");
      break;
    }
  }
  if (byte_count < channel->byte_count) {
    header->flags |= NACL_ABI_RECVMSG_DATA_TRUNCATED;
  }
  descv_read_count =
      size_min(channel->desc_count,
               header->NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH);
  /* channel->desc_count <= NACL_ABI_SIZE_T_MAX, so casts are safe. */
  if (SIZE_T_MAX / sizeof(NaClSrpcMessageDesc) < descv_read_count) {
    /* Descriptor descv_read_count * sizeof would overflow. */
    return -1;
  }
  memcpy(header->NACL_SRPC_MESSAGE_HEADER_DESCV,
         channel->descs,
         descv_read_count * sizeof(NaClSrpcMessageDesc));
  header->NACL_SRPC_MESSAGE_HEADER_DESC_LENGTH = (nacl_abi_size_t)
      descv_read_count;
  if (descv_read_count < channel->desc_count) {
    header->flags |= NACL_ABI_RECVMSG_DESC_TRUNCATED;
  }
  /* Reading clears the fragment from the buffer. */
  if (!peeking) {
    channel->byte_count = 0;
    channel->desc_count = 0;
  }
  return (ssize_t) byte_count;
}