Example #1
0
int pn_selector_select(pn_selector_t *selector, int timeout)
{
  assert(selector);
  pn_error_clear(selector->error);
  pn_timestamp_t deadline = 0;
  pn_timestamp_t now = pn_i_now();

  if (timeout) {
    if (selector->deadlines_head)
      deadline = selector->deadlines_head->deadline;
  }
  if (deadline) {
    int64_t delta = deadline - now;
    if (delta < 0) {
      delta = 0;
    }
    if (timeout < 0)
      timeout = delta;
    else if (timeout > delta)
      timeout = delta;
  }
  deadline = (timeout >= 0) ? now + timeout : 0;

  // Process all currently available completions, even if matched events available
  pni_iocp_drain_completions(selector->iocp);
  pni_zombie_check(selector->iocp, now);
  // Loop until an interested event is matched, or until deadline
  while (true) {
    if (selector->triggered_list_head)
      break;
    if (deadline && deadline <= now)
      break;
    pn_timestamp_t completion_deadline = deadline;
    pn_timestamp_t zd = pni_zombie_deadline(selector->iocp);
    if (zd)
      completion_deadline = completion_deadline ? pn_min(zd, completion_deadline) : zd;

    int completion_timeout = (!completion_deadline) ? -1 : completion_deadline - now;
    int rv = pni_iocp_wait_one(selector->iocp, completion_timeout, selector->error);
    if (rv < 0)
      return pn_error_code(selector->error);

    now = pn_i_now();
    if (zd && zd <= now) {
      pni_zombie_check(selector->iocp, now);
    }
  }

  selector->current = 0;
  selector->awoken = now;
  for (iocpdesc_t *iocpd = selector->deadlines_head; iocpd; iocpd = iocpd->deadlines_next) {
    if (iocpd->deadline <= now)
      pni_events_update(iocpd, iocpd->events | PN_EXPIRED);
    else
      break;
  }
  selector->current_triggered = selector->triggered_list_head;
  return pn_error_code(selector->error);
}
Example #2
0
pn_socket_t pni_iocp_end_accept(iocpdesc_t *ld, sockaddr *addr, socklen_t *addrlen, bool *would_block, pn_error_t *error)
{
  if (!is_listener(ld)) {
    set_iocp_error_status(error, PN_ERR, WSAEOPNOTSUPP);
    return INVALID_SOCKET;
  }
  if (ld->read_closed) {
    set_iocp_error_status(error, PN_ERR, WSAENOTSOCK);
    return INVALID_SOCKET;
  }
  if (pn_list_size(ld->acceptor->accepts) == 0) {
    if (ld->events & PN_READABLE && ld->iocp->iocp_trace)
      iocp_log("listen socket readable with no available accept completions\n");
    *would_block = true;
    return INVALID_SOCKET;
  }

  accept_result_t *result = (accept_result_t *) pn_list_get(ld->acceptor->accepts, 0);
  pn_list_del(ld->acceptor->accepts, 0, 1);
  if (!pn_list_size(ld->acceptor->accepts))
    pni_events_update(ld, ld->events & ~PN_READABLE);  // No pending accepts

  pn_socket_t accept_sock;
  if (result->base.status) {
    accept_sock = INVALID_SOCKET;
    pni_win32_error(ld->error, "accept failure", result->base.status);
    if (ld->iocp->iocp_trace)
      iocp_log("%s\n", pn_error_text(ld->error));
    // App never sees this socket so close it here.
    pni_iocp_begin_close(result->new_sock);
  } else {
    accept_sock = result->new_sock->socket;
    // AcceptEx special setsockopt:
    setsockopt(accept_sock, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, (char*)&ld->socket,
                  sizeof (SOCKET));
    if (addr && addrlen && *addrlen > 0) {
      sockaddr_storage *local_addr = NULL;
      sockaddr_storage *remote_addr = NULL;
      int local_addrlen, remote_addrlen;
      LPFN_GETACCEPTEXSOCKADDRS fn = ld->acceptor->fn_get_accept_ex_sockaddrs;
      fn(result->address_buffer, 0, IOCP_SOCKADDRMAXLEN, IOCP_SOCKADDRMAXLEN,
         (SOCKADDR **) &local_addr, &local_addrlen, (SOCKADDR **) &remote_addr,
         &remote_addrlen);
      *addrlen = pn_min(*addrlen, remote_addrlen);
      memmove(addr, remote_addr, *addrlen);
    }
  }

  if (accept_sock != INVALID_SOCKET) {
    // Connected.
    result->new_sock->read_closed = false;
    result->new_sock->write_closed = false;
  }

  // Done with the completion result, so reuse it
  result->new_sock = NULL;
  begin_accept(ld->acceptor, result);
  return accept_sock;
}
Example #3
0
ssize_t pni_iocp_begin_write(iocpdesc_t *iocpd, const void *buf, size_t len, bool *would_block, pn_error_t *error)
{
  if (len == 0) return 0;
  *would_block = false;
  if (is_listener(iocpd)) {
    set_iocp_error_status(error, PN_ERR, WSAEOPNOTSUPP);
    return INVALID_SOCKET;
  }
  if (iocpd->closing) {
    set_iocp_error_status(error, PN_ERR, WSAESHUTDOWN);
    return SOCKET_ERROR;
  }
  if (iocpd->write_closed) {
    assert(pn_error_code(iocpd->error));
    pn_error_copy(error, iocpd->error);
    if (iocpd->iocp->iocp_trace)
      iocp_log("write error: %s\n", pn_error_text(error));
    return SOCKET_ERROR;
  }
  if (len == 0) return 0;
  if (!(iocpd->events & PN_WRITABLE)) {
    *would_block = true;
    return SOCKET_ERROR;
  }

  size_t written = 0;
  size_t requested = len;
  const char *outgoing = (const char *) buf;
  size_t available = pni_write_pipeline_reserve(iocpd->pipeline, len);
  if (!available) {
    *would_block = true;
    return SOCKET_ERROR;
  }

  for (size_t wr_count = 0; wr_count < available; wr_count++) {
    write_result_t *result = pni_write_pipeline_next(iocpd->pipeline);
    assert(result);
    result->base.iocpd = iocpd;
    ssize_t actual_len = pn_min(len, result->buffer.size);
    result->requested = actual_len;
    memmove((void *)result->buffer.start, outgoing, actual_len);
    outgoing += actual_len;
    written += actual_len;
    len -= actual_len;

    int werror = submit_write(result, result->buffer.start, actual_len);
    if (werror && WSAGetLastError() != ERROR_IO_PENDING) {
      pni_write_pipeline_return(iocpd->pipeline, result);
      iocpdesc_fail(iocpd, WSAGetLastError(), "overlapped send");
      return SOCKET_ERROR;
    }
    iocpd->ops_in_progress++;
  }

  if (!pni_write_pipeline_writable(iocpd->pipeline))
    pni_events_update(iocpd, iocpd->events & ~PN_WRITABLE);
  return written;
}
Example #4
0
// Reserve as many buffers as possible for count bytes.
size_t pni_write_pipeline_reserve(write_pipeline_t *pl, size_t count)
{
    if (pl->primary->in_use)
        return 0;  // I.e. io->wouldblock
    if (!pl->depth)
        set_depth(pl);
    if (pl->depth == 1) {
        // always use the primary
        pl->reserved_count = 1;
        pl->next_primary_index = 0;
        return 1;
    }

    iocp_t *iocp = pl->iocpd->iocp;
    confirm_as_writer(pl);
    size_t wanted = (count / IOCP_WBUFSIZE);
    if (count % IOCP_WBUFSIZE)
        wanted++;
    size_t pending = pl->pending_count;
    assert(pending < pl->depth);
    size_t bufs = pn_min(wanted, pl->depth - pending);
    // Can draw from shared pool or the primary... but share with others.
    size_t writers = iocp->writer_count;
    size_t shared_count = (iocp->shared_available_count + writers - 1) / writers;
    bufs = pn_min(bufs, shared_count + 1);
    pl->reserved_count = pending + bufs;

    if (bufs == wanted &&
            pl->reserved_count < (pl->depth / 2) &&
            iocp->shared_available_count > (2 * writers + bufs)) {
        // No shortage: keep the primary as spare for future use
        pl->next_primary_index = pl->reserved_count;
    } else if (bufs == 1) {
        pl->next_primary_index = pending;
    } else {
        // let approx 1/3 drain before replenishing
        pl->next_primary_index = ((pl->reserved_count + 2) / 3) - 1;
        if (pl->next_primary_index < pending)
            pl->next_primary_index = pending;
    }
    return bufs;
}
Example #5
0
int pn_buffer_append(pn_buffer_t *buf, const char *bytes, size_t size)
{
  int err = pn_buffer_ensure(buf, size);
  if (err) return err;

  size_t tail = pn_buffer_tail(buf);
  size_t tail_space = pn_buffer_tail_space(buf);
  size_t n = pn_min(tail_space, size);

  memmove(buf->bytes + tail, bytes, n);
  memmove(buf->bytes, bytes + n, size - n);

  buf->size += size;

  return 0;
}
Example #6
0
int pn_buffer_prepend(pn_buffer_t *buf, const char *bytes, size_t size)
{
  int err = pn_buffer_ensure(buf, size);
  if (err) return err;

  size_t head = pn_buffer_head(buf);
  size_t head_space = pn_buffer_head_space(buf);
  size_t n = pn_min(head_space, size);

  memmove(buf->bytes + head - n, bytes + size - n, n);
  memmove(buf->bytes + buf->capacity - (size - n), bytes, size - n);

  if (buf->start >= size) {
    buf->start -= size;
  } else {
    buf->start = buf->capacity - (size - buf->start);
  }

  buf->size += size;

  return 0;
}
Example #7
0
size_t pn_buffer_get(pn_buffer_t *buf, size_t offset, size_t size, char *dst)
{
  size = pn_min(size, buf->size);
  size_t start = pn_buffer_index(buf, offset);
  size_t stop = pn_buffer_index(buf, offset + size);

  if (size == 0) return 0;

  size_t sz1;
  size_t sz2;

  if (start >= stop) {
    sz1 = buf->capacity - start;
    sz2 = stop;
  } else {
    sz1 = stop - start;
    sz2 = 0;
  }

  memmove(dst, buf->bytes + start, sz1);
  memmove(dst + sz1, buf->bytes, sz2);

  return sz1 + sz2;
}
Example #8
0
// take data from the network, and pass it into SSL.  Attempt to read decrypted data from
// SSL socket and pass it to the application.
static ssize_t process_input_ssl( pn_transport_t *transport, unsigned int layer, const char *input_data, size_t available)
{
  pni_ssl_t *ssl = transport->ssl;
  if (ssl->ssl == NULL && init_ssl_socket(transport, ssl)) return PN_EOS;

  ssl_log( transport, "process_input_ssl( data size=%d )",available );

  ssize_t consumed = 0;
  bool work_pending;
  bool shutdown_input = (available == 0);  // caller is closed

  do {
    work_pending = false;

    // Write to network bio as much as possible, consuming bytes/available

    if (available > 0) {
      int written = BIO_write( ssl->bio_net_io, input_data, available );
      if (written > 0) {
        input_data += written;
        available -= written;
        consumed += written;
        ssl->read_blocked = false;
        work_pending = (available > 0);
        ssl_log( transport, "Wrote %d bytes to BIO Layer, %d left over", written, available );
      }
    } else if (shutdown_input) {
      // lower layer (caller) has closed.  Close the WRITE side of the BIO.  This will cause
      // an EOF to be passed to SSL once all pending inbound data has been consumed.
      ssl_log( transport, "Lower layer closed - shutting down BIO write side");
      (void)BIO_shutdown_wr( ssl->bio_net_io );
      shutdown_input = false;
    }

    // Read all available data from the SSL socket

    if (!ssl->ssl_closed && ssl->in_count < ssl->in_size) {
      int read = BIO_read( ssl->bio_ssl, &ssl->inbuf[ssl->in_count], ssl->in_size - ssl->in_count );
      if (read > 0) {
        ssl_log( transport, "Read %d bytes from SSL socket for app", read );
        ssl_log_clear_data(transport, &ssl->inbuf[ssl->in_count], read );
        ssl->in_count += read;
        work_pending = true;
      } else {
        if (!BIO_should_retry(ssl->bio_ssl)) {
          int reason = SSL_get_error( ssl->ssl, read );
          switch (reason) {
          case SSL_ERROR_ZERO_RETURN:
            // SSL closed cleanly
            ssl_log(transport, "SSL connection has closed");
            start_ssl_shutdown(transport);  // KAG: not sure - this may not be necessary
            ssl->ssl_closed = true;
            break;
          default:
            // unexpected error
            return (ssize_t)ssl_failed(transport);
          }
        } else {
          if (BIO_should_write( ssl->bio_ssl )) {
            ssl->write_blocked = true;
            ssl_log(transport, "Detected write-blocked");
          }
          if (BIO_should_read( ssl->bio_ssl )) {
            ssl->read_blocked = true;
            ssl_log(transport, "Detected read-blocked");
          }
        }
      }
    }

    // write incoming data to app layer

    if (!ssl->app_input_closed) {
      if (ssl->in_count > 0 || ssl->ssl_closed) {  /* if ssl_closed, send 0 count */
        ssize_t consumed = transport->io_layers[layer+1]->process_input(transport, layer+1, ssl->inbuf, ssl->in_count);
        if (consumed > 0) {
          ssl->in_count -= consumed;
          if (ssl->in_count)
            memmove( ssl->inbuf, ssl->inbuf + consumed, ssl->in_count );
          work_pending = true;
          ssl_log( transport, "Application consumed %d bytes from peer", (int) consumed );
        } else if (consumed < 0) {
          ssl_log(transport, "Application layer closed its input, error=%d (discarding %d bytes)",
               (int) consumed, (int)ssl->in_count);
          ssl->in_count = 0;    // discard any pending input
          ssl->app_input_closed = consumed;
          if (ssl->app_output_closed && ssl->out_count == 0) {
            // both sides of app closed, and no more app output pending:
            start_ssl_shutdown(transport);
          }
        } else {
          // app did not consume any bytes, must be waiting for a full frame
          if (ssl->in_count == ssl->in_size) {
            // but the buffer is full, not enough room for a full frame.
            // can we grow the buffer?
            uint32_t max_frame = pn_transport_get_max_frame(transport);
            if (!max_frame) max_frame = ssl->in_size * 2;  // no limit
            if (ssl->in_size < max_frame) {
              // no max frame limit - grow it.
              size_t newsize = pn_min(max_frame, ssl->in_size * 2);
              char *newbuf = (char *)realloc( ssl->inbuf, newsize );
              if (newbuf) {
                ssl->in_size = newsize;
                ssl->inbuf = newbuf;
                work_pending = true;  // can we get more input?
              }
            } else {
              // can't gather any more input, but app needs more?
              // This is a bug - since SSL can buffer up to max-frame,
              // the application _must_ have enough data to process.  If
              // this is an oversized frame, the app _must_ handle it
              // by returning an error code to SSL.
              pn_transport_log(transport, "Error: application unable to consume input.");
            }
          }
        }
      }
    }

  } while (work_pending);

  //_log(ssl, "ssl_closed=%d in_count=%d app_input_closed=%d app_output_closed=%d",
  //     ssl->ssl_closed, ssl->in_count, ssl->app_input_closed, ssl->app_output_closed );

  // PROTON-82: Instead, close the input side as soon as we've completed enough of the SSL
  // shutdown handshake to send the close_notify.  We're not requiring the response, as
  // some implementations never reply.
  // ---
  // tell transport our input side is closed if the SSL socket cannot be read from any
  // longer, AND any pending input has been written up to the application (or the
  // application is closed)
  //if (ssl->ssl_closed && ssl->app_input_closed) {
  //  consumed = ssl->app_input_closed;
  //}
  if (ssl->app_input_closed && (SSL_get_shutdown(ssl->ssl) & SSL_SENT_SHUTDOWN) ) {
    consumed = ssl->app_input_closed;
    if (transport->io_layers[layer]==&ssl_output_closed_layer) {
      transport->io_layers[layer] = &ssl_closed_layer;
    } else {
      transport->io_layers[layer] = &ssl_input_closed_layer;
    }
  }
  ssl_log(transport, "process_input_ssl() returning %d", (int) consumed);
  return consumed;
}
Example #9
0
// which timestamp will expire next, or zero if none set
pn_timestamp_t pn_timestamp_min( pn_timestamp_t a, pn_timestamp_t b )
{
  if (a && b) return pn_min(a, b);
  if (a) return a;
  return b;
}