Exemple #1
0
/* Update metric oldest and latest timestamps when removing old values */
void pg_cache_update_metric_times(struct pg_cache_page_index *page_index)
{
    Pvoid_t *firstPValue, *lastPValue;
    Word_t firstIndex, lastIndex;
    struct rrdeng_page_cache_descr *descr;
    usec_t oldest_time = INVALID_TIME;
    usec_t latest_time = INVALID_TIME;

    uv_rwlock_rdlock(&page_index->lock);
    /* Find first page in range */
    firstIndex = (Word_t)0;
    firstPValue = JudyLFirst(page_index->JudyL_array, &firstIndex, PJE0);
    if (likely(NULL != firstPValue)) {
        descr = *firstPValue;
        oldest_time = descr->start_time;
    }
    lastIndex = (Word_t)-1;
    lastPValue = JudyLLast(page_index->JudyL_array, &lastIndex, PJE0);
    if (likely(NULL != lastPValue)) {
        descr = *lastPValue;
        latest_time = descr->end_time;
    }
    uv_rwlock_rdunlock(&page_index->lock);

    if (unlikely(NULL == firstPValue)) {
        assert(NULL == lastPValue);
        page_index->oldest_time = page_index->latest_time = INVALID_TIME;
        return;
    }
    page_index->oldest_time = oldest_time;
    page_index->latest_time = latest_time;
}
Exemple #2
0
int worker_get_callmodel_delta(ls_worker_t* w, int* delta) {
    uv_rwlock_rdlock(&w->callmodel_delta_lock);
    *delta = w->callmodel_delta;
    uv_rwlock_rdunlock(&w->callmodel_delta_lock);

    return 0;
}
Exemple #3
0
/* If index is NULL lookup by UUID (descr->id) */
void pg_cache_insert(struct rrdengine_instance *ctx, struct pg_cache_page_index *index,
                     struct rrdeng_page_cache_descr *descr)
{
    struct page_cache *pg_cache = &ctx->pg_cache;
    Pvoid_t *PValue;
    struct pg_cache_page_index *page_index;

    if (descr->flags & RRD_PAGE_POPULATED) {
        pg_cache_reserve_pages(ctx, 1);
        if (!(descr->flags & RRD_PAGE_DIRTY))
            pg_cache_replaceQ_insert(ctx, descr);
    }

    if (unlikely(NULL == index)) {
        uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
        PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, descr->id, sizeof(uuid_t));
        assert(NULL != PValue);
        page_index = *PValue;
        uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
    } else {
        page_index = index;
    }

    uv_rwlock_wrlock(&page_index->lock);
    PValue = JudyLIns(&page_index->JudyL_array, (Word_t)(descr->start_time / USEC_PER_SEC), PJE0);
    *PValue = descr;
    pg_cache_add_new_metric_time(page_index, descr);
    uv_rwlock_wrunlock(&page_index->lock);

    uv_rwlock_wrlock(&pg_cache->pg_cache_rwlock);
    ++ctx->stats.pg_cache_insertions;
    ++pg_cache->page_descriptors;
    uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);
}
Exemple #4
0
Job Workers::job()
{
    uv_rwlock_rdlock(&m_rwlock);
    Job job = m_job;
    uv_rwlock_rdunlock(&m_rwlock);

    return job;
}
Exemple #5
0
/*
 * TODO: last waiter frees descriptor ?
 */
void pg_cache_punch_hole(struct rrdengine_instance *ctx, struct rrdeng_page_cache_descr *descr)
{
    struct page_cache *pg_cache = &ctx->pg_cache;
    Pvoid_t *PValue;
    struct pg_cache_page_index *page_index;
    int ret;

    uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
    PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, descr->id, sizeof(uuid_t));
    assert(NULL != PValue);
    page_index = *PValue;
    uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);

    uv_rwlock_wrlock(&page_index->lock);
    ret = JudyLDel(&page_index->JudyL_array, (Word_t)(descr->start_time / USEC_PER_SEC), PJE0);
    uv_rwlock_wrunlock(&page_index->lock);
    if (unlikely(0 == ret)) {
        error("Page under deletion was not in index.");
        if (unlikely(debug_flags & D_RRDENGINE))
            print_page_cache_descr(descr);
        goto destroy;
    }
    assert(1 == ret);

    uv_rwlock_wrlock(&pg_cache->pg_cache_rwlock);
    ++ctx->stats.pg_cache_deletions;
    --pg_cache->page_descriptors;
    uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);

    uv_mutex_lock(&descr->mutex);
    while (!pg_cache_try_get_unsafe(descr, 1)) {
        debug(D_RRDENGINE, "%s: Waiting for locked page:", __func__);
        if(unlikely(debug_flags & D_RRDENGINE))
            print_page_cache_descr(descr);
        pg_cache_wait_event_unsafe(descr);
    }
    /* even a locked page could be dirty */
    while (unlikely(descr->flags & RRD_PAGE_DIRTY)) {
        debug(D_RRDENGINE, "%s: Found dirty page, waiting for it to be flushed:", __func__);
        if (unlikely(debug_flags & D_RRDENGINE))
            print_page_cache_descr(descr);
        pg_cache_wait_event_unsafe(descr);
    }
    uv_mutex_unlock(&descr->mutex);

    if (descr->flags & RRD_PAGE_POPULATED) {
        /* only after locking can it be safely deleted from LRU */
        pg_cache_replaceQ_delete(ctx, descr);

        uv_rwlock_wrlock(&pg_cache->pg_cache_rwlock);
        pg_cache_evict_unsafe(ctx, descr);
        uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);
    }
    pg_cache_put(descr);
destroy:
    pg_cache_destroy_descr(descr);
    pg_cache_update_metric_times(page_index);
}
Exemple #6
0
static PyObject *
RWLock_func_rdlock(RWLock *self)
{
    RAISE_IF_NOT_INITIALIZED(self, NULL);

    Py_BEGIN_ALLOW_THREADS
    uv_rwlock_rdlock(&self->uv_rwlock);
    Py_END_ALLOW_THREADS

    Py_RETURN_NONE;
}
Exemple #7
0
static void
inet_recv_cb(uv_udp_t *handle, ssize_t nread, const uv_buf_t *buf,
             const struct sockaddr *addr, unsigned flags)
{
    struct tundev_context *ctx = container_of(handle, struct tundev_context,
                                              inet_udp);

    if (nread > 0) {
        uint8_t *m = (uint8_t *)buf->base;
        ssize_t mlen = nread - PRIMITIVE_BYTES;

        int rc = crypto_decrypt(m, (uint8_t *)buf->base, nread);
        if (rc) {
            int port = 0;
            char remote[INET_ADDRSTRLEN + 1];
            port = ip_name(addr, remote, sizeof(remote));
            logger_log(LOG_ERR, "Invalid udp packet from %s:%d", remote, port);
            return;
        }

        if (mode == xTUN_SERVER) {
            struct iphdr *iphdr = (struct iphdr *) m;

            in_addr_t client_network = iphdr->saddr & htonl(ctx->tun->netmask);
            if (client_network != ctx->tun->network) {
                char *a = inet_ntoa(*(struct in_addr *) &iphdr->saddr);
                logger_log(LOG_ERR, "Invalid client: %s", a);
                return;
            }

            // TODO: Compare source address
            uv_rwlock_rdlock(&rwlock);
            struct peer *peer = lookup_peer(iphdr->saddr, peers);
            uv_rwlock_rdunlock(&rwlock);
            if (peer == NULL) {
                char saddr[24] = {0}, daddr[24] = {0};
                parse_addr(iphdr, saddr, daddr);
                logger_log(LOG_WARNING, "[UDP] Cache miss: %s -> %s", saddr, daddr);
                uv_rwlock_wrlock(&rwlock);
                peer = save_peer(iphdr->saddr, (struct sockaddr *) addr, peers);
                uv_rwlock_wrunlock(&rwlock);

            } else {
                if (memcmp(&peer->remote_addr, addr, sizeof(*addr))) {
                    peer->remote_addr = *addr;
                }
            }
            peer->protocol = xTUN_UDP;
        }

        network_to_tun(ctx->tunfd, m, mlen);
    }
}
Exemple #8
0
void reader(void *n)
{
	int num = *(int*)n;
	int i;
	for (i = 0; i < 20; ++i)
	{
		uv_rwlock_rdlock(&numlock);
		printf("Reader %d: acquired lock\n", num);
		printf("Reader %d: shared num = %d\n", num, shared_num);
		uv_rwlock_rdunlock(&numlock);
		printf("Reader %d: released lock\n", num);
	}
	uv_barrier_wait(&blocker);
}
Exemple #9
0
static ssize_t uv__fs_open(uv_fs_t* req) {
  static int no_cloexec_support;
  int r;

  /* Try O_CLOEXEC before entering locks */
  if (no_cloexec_support == 0) {
#ifdef O_CLOEXEC
    r = open(req->path, req->flags | O_CLOEXEC, req->mode);
    if (r >= 0)
      return r;
    if (errno != EINVAL)
      return r;
    no_cloexec_support = 1;
#endif  /* O_CLOEXEC */
  }

  if (req->cb != NULL)
    uv_rwlock_rdlock(&req->loop->cloexec_lock);

  r = open(req->path, req->flags, req->mode);

  /* In case of failure `uv__cloexec` will leave error in `errno`,
   * so it is enough to just set `r` to `-1`.
   */
  if (r >= 0 && uv__cloexec(r, 1) != 0) {
    r = uv__close(r);
    if (r != 0 && r != -EINPROGRESS)
      abort();
    r = -1;
  }

  if (req->cb != NULL)
    uv_rwlock_rdunlock(&req->loop->cloexec_lock);

  return r;
}
Exemple #10
0
 /**
  * Get the number of sessions in the array
  *
  * @return Sessions in array
  */
 unsigned int count() const {
   uv_rwlock_rdlock(&sessions_lock);
   int size = sessions.size();
   uv_rwlock_rdunlock(&sessions_lock);
   return size;
 }
Exemple #11
0
 inline void rdlock() { uv_rwlock_rdlock(&mtx); }
Exemple #12
0
/*
 * Searches for a page and triggers disk I/O if necessary and possible.
 * Does not get a reference.
 * Returns page index pointer for given metric UUID.
 */
struct pg_cache_page_index *
        pg_cache_preload(struct rrdengine_instance *ctx, uuid_t *id, usec_t start_time, usec_t end_time)
{
    struct page_cache *pg_cache = &ctx->pg_cache;
    struct rrdeng_page_cache_descr *descr = NULL, *preload_array[PAGE_CACHE_MAX_PRELOAD_PAGES];
    int i, j, k, count, found;
    unsigned long flags;
    Pvoid_t *PValue;
    struct pg_cache_page_index *page_index;
    Word_t Index;
    uint8_t failed_to_reserve;

    uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
    PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, id, sizeof(uuid_t));
    if (likely(NULL != PValue)) {
        page_index = *PValue;
    }
    uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
    if (NULL == PValue) {
        debug(D_RRDENGINE, "%s: No page was found to attempt preload.", __func__);
        return NULL;
    }

    uv_rwlock_rdlock(&page_index->lock);
    /* Find first page in range */
    found = 0;
    Index = (Word_t)(start_time / USEC_PER_SEC);
    PValue = JudyLLast(page_index->JudyL_array, &Index, PJE0);
    if (likely(NULL != PValue)) {
        descr = *PValue;
        if (is_page_in_time_range(descr, start_time, end_time)) {
            found = 1;
        }
    }
    if (!found) {
        Index = (Word_t)(start_time / USEC_PER_SEC);
        PValue = JudyLFirst(page_index->JudyL_array, &Index, PJE0);
        if (likely(NULL != PValue)) {
            descr = *PValue;
            if (is_page_in_time_range(descr, start_time, end_time)) {
                found = 1;
            }
        }
    }
    if (!found) {
        uv_rwlock_rdunlock(&page_index->lock);
        debug(D_RRDENGINE, "%s: No page was found to attempt preload.", __func__);
        return page_index;
    }

    for (count = 0 ;
         descr != NULL && is_page_in_time_range(descr, start_time, end_time);
         PValue = JudyLNext(page_index->JudyL_array, &Index, PJE0),
         descr = unlikely(NULL == PValue) ? NULL : *PValue) {
        /* Iterate all pages in range */

        if (unlikely(0 == descr->page_length))
            continue;
        uv_mutex_lock(&descr->mutex);
        flags = descr->flags;
        if (pg_cache_can_get_unsafe(descr, 0)) {
            if (flags & RRD_PAGE_POPULATED) {
                /* success */
                uv_mutex_unlock(&descr->mutex);
                debug(D_RRDENGINE, "%s: Page was found in memory.", __func__);
                continue;
            }
        }
        if (!(flags & RRD_PAGE_POPULATED) && pg_cache_try_get_unsafe(descr, 1)) {
            preload_array[count++] = descr;
            if (PAGE_CACHE_MAX_PRELOAD_PAGES == count) {
                uv_mutex_unlock(&descr->mutex);
                break;
            }
        }
        uv_mutex_unlock(&descr->mutex);

    };
    uv_rwlock_rdunlock(&page_index->lock);

    failed_to_reserve = 0;
    for (i = 0 ; i < count && !failed_to_reserve ; ++i) {
        struct rrdeng_cmd cmd;
        struct rrdeng_page_cache_descr *next;

        descr = preload_array[i];
        if (NULL == descr) {
            continue;
        }
        if (!pg_cache_try_reserve_pages(ctx, 1)) {
            failed_to_reserve = 1;
            break;
        }
        cmd.opcode = RRDENG_READ_EXTENT;
        cmd.read_extent.page_cache_descr[0] = descr;
        /* don't use this page again */
        preload_array[i] = NULL;
        for (j = 0, k = 1 ; j < count ; ++j) {
            next = preload_array[j];
            if (NULL == next) {
                continue;
            }
            if (descr->extent == next->extent) {
                /* same extent, consolidate */
                if (!pg_cache_try_reserve_pages(ctx, 1)) {
                    failed_to_reserve = 1;
                    break;
                }
                cmd.read_extent.page_cache_descr[k++] = next;
                /* don't use this page again */
                preload_array[j] = NULL;
            }
        }
        cmd.read_extent.page_count = k;
        rrdeng_enq_cmd(&ctx->worker_config, &cmd);
    }
    if (failed_to_reserve) {
        debug(D_RRDENGINE, "%s: Failed to reserve enough memory, canceling I/O.", __func__);
        for (i = 0 ; i < count ; ++i) {
            descr = preload_array[i];
            if (NULL == descr) {
                continue;
            }
            pg_cache_put(descr);
        }
    }
    if (!count) {
        /* no such page */
        debug(D_RRDENGINE, "%s: No page was eligible to attempt preload.", __func__);
    }
    return page_index;
}
Exemple #13
0
static void uv__fs_work(struct uv__work* w) {
    int retry_on_eintr;
    uv_fs_t* req;
    ssize_t r;
#ifdef O_CLOEXEC
    static int no_cloexec_support;
#endif  /* O_CLOEXEC */

    req = container_of(w, uv_fs_t, work_req);
    retry_on_eintr = !(req->fs_type == UV_FS_CLOSE);

    do {
        errno = 0;

#define X(type, action)                                                       \
  case UV_FS_ ## type:                                                        \
    r = action;                                                               \
    break;

        switch (req->fs_type) {
            X(CHMOD, chmod(req->path, req->mode));
            X(CHOWN, chown(req->path, req->uid, req->gid));
            X(CLOSE, close(req->file));
            X(FCHMOD, fchmod(req->file, req->mode));
            X(FCHOWN, fchown(req->file, req->uid, req->gid));
            X(FDATASYNC, uv__fs_fdatasync(req));
            X(FSTAT, uv__fs_fstat(req->file, &req->statbuf));
            X(FSYNC, fsync(req->file));
            X(FTRUNCATE, ftruncate(req->file, req->off));
            X(FUTIME, uv__fs_futime(req));
            X(LSTAT, uv__fs_lstat(req->path, &req->statbuf));
            X(LINK, link(req->path, req->new_path));
            X(MKDIR, mkdir(req->path, req->mode));
            X(MKDTEMP, uv__fs_mkdtemp(req));
            X(READ, uv__fs_read(req));
            X(READDIR, uv__fs_readdir(req));
            X(READLINK, uv__fs_readlink(req));
            X(RENAME, rename(req->path, req->new_path));
            X(RMDIR, rmdir(req->path));
            X(SENDFILE, uv__fs_sendfile(req));
            X(STAT, uv__fs_stat(req->path, &req->statbuf));
            X(SYMLINK, symlink(req->path, req->new_path));
            X(UNLINK, unlink(req->path));
            X(UTIME, uv__fs_utime(req));
            X(WRITE, uv__fs_write(req));
        case UV_FS_OPEN:
#ifdef O_CLOEXEC
            /* Try O_CLOEXEC before entering locks */
            if (!no_cloexec_support) {
                r = open(req->path, req->flags | O_CLOEXEC, req->mode);
                if (r >= 0)
                    break;
                if (errno != EINVAL)
                    break;
                no_cloexec_support = 1;
            }
#endif  /* O_CLOEXEC */
            if (req->cb != NULL)
                uv_rwlock_rdlock(&req->loop->cloexec_lock);
            r = open(req->path, req->flags, req->mode);

            /*
             * In case of failure `uv__cloexec` will leave error in `errno`,
             * so it is enough to just set `r` to `-1`.
             */
            if (r >= 0 && uv__cloexec(r, 1) != 0) {
                r = uv__close(r);
                if (r != 0 && r != -EINPROGRESS)
                    abort();
                r = -1;
            }
            if (req->cb != NULL)
                uv_rwlock_rdunlock(&req->loop->cloexec_lock);
            break;
        default:
            abort();
        }

#undef X
    }
    while (r == -1 && errno == EINTR && retry_on_eintr);

    if (r == -1)
        req->result = -errno;
    else
        req->result = r;

    if (r == 0 && (req->fs_type == UV_FS_STAT ||
                   req->fs_type == UV_FS_FSTAT ||
                   req->fs_type == UV_FS_LSTAT)) {
        req->ptr = &req->statbuf;
    }
}
Exemple #14
0
static void
php_cassandra_log(const CassLogMessage* message, void* data)
{
  char log[MAXPATHLEN + 1];
  uint log_length = 0;

  /* Making a copy here because location could be updated by a PHP thread. */
  uv_rwlock_rdlock(&log_lock);
  if (log_location) {
    log_length = MIN(strlen(log_location), MAXPATHLEN);
    memcpy(log, log_location, log_length);
  }
  uv_rwlock_rdunlock(&log_lock);

  log[log_length] = '\0';

  if (log_length > 0) {
    int fd = -1;
#ifndef _WIN32
    if (!strcmp(log, "syslog")) {
      php_syslog(LOG_NOTICE, "cassandra | [%s] %s (%s:%d)",
                 cass_log_level_string(message->severity), message->message,
                 message->file, message->line);
      return;
    }
#endif

    fd = open(log, O_CREAT | O_APPEND | O_WRONLY, 0644);

    if (fd != 1) {
      time_t log_time;
      struct tm log_tm;
      char log_time_str[32];
      size_t needed = 0;
      char* tmp     = NULL;

      time(&log_time);
      php_localtime_r(&log_time, &log_tm);
      strftime(log_time_str, sizeof(log_time_str), "%d-%m-%Y %H:%M:%S %Z", &log_tm);

      needed = snprintf(NULL, 0, "%s [%s] %s (%s:%d)%s",
                        log_time_str,
                        cass_log_level_string(message->severity), message->message,
                        message->file, message->line,
                        PHP_EOL);

      tmp = malloc(needed + 1);
      sprintf(tmp, "%s [%s] %s (%s:%d)%s",
              log_time_str,
              cass_log_level_string(message->severity), message->message,
              message->file, message->line,
              PHP_EOL);

      write(fd, tmp, needed);
      free(tmp);
      close(fd);
      return;
    }
  }

  /* This defaults to using "stderr" instead of "sapi_module.log_message"
   * because there are no guarantees that all implementations of the SAPI
   * logging function are thread-safe.
   */

  fprintf(stderr, "cassandra | [%s] %s (%s:%d)%s",
          cass_log_level_string(message->severity), message->message,
          message->file, message->line,
          PHP_EOL);
}
Exemple #15
0
// do_ssl: process pending data from OpenSSL and send any data that's
// waiting. Returns 1 if ok, 0 if the connection should be terminated
int do_ssl(connection_state *state)
{
  BYTE *response = NULL;
  int response_len = 0;
  kssl_error_code err;

  // First determine whether the SSL_accept has completed. If not then any
  // data on the TCP connection is related to the handshake and is not
  // application data.

  if (!state->connected) {
    if (!SSL_is_init_finished(state->ssl)) {
      int rc = SSL_do_handshake(state->ssl);
  
      if (rc != 1) {
        switch (SSL_get_error(state->ssl, rc)) {
        case SSL_ERROR_WANT_READ:
        case SSL_ERROR_WANT_WRITE:
          ERR_clear_error();
          return 1;
          
        default:
          ERR_clear_error();
          return 0;
        }
      }
    }

    state->connected = 1;
  }

  // Read whatever data needs to be read (controlled by state->need)

  while (state->need > 0) {
    int read = SSL_read(state->ssl, state->current, state->need);

    if (read <= 0) {
      int err = SSL_get_error(state->ssl, read);
      switch (err) {

        // Nothing to read so wait for an event notification by exiting
        // this function, or SSL needs to do a write (typically because of
        // a connection regnegotiation happening) and so an SSL_read
        // isn't possible right now. In either case return from this
        // function and wait for a callback indicating that the socket
        // is ready for a read.

      case SSL_ERROR_WANT_READ:
      case SSL_ERROR_WANT_WRITE:
        ERR_clear_error();
        return 1;

        // Connection termination

      case SSL_ERROR_ZERO_RETURN:
        ERR_clear_error();
        return 0;

        // Something went wrong so give up on connetion

      default:
        log_ssl_error(state->ssl, read);
        return 0;
      }
    }

    // Read some number of bytes into the state->current buffer so move that
    // pointer on and reduce the state->need. If there's still more
    // needed then loop around to see if we can read it. This is
    // essential because we will only get a single event when data
    // becomes ready and will need to read it all.

    state->need -= read;
    state->current += read;

    if (state->need > 0) {
      continue;
    }

    // All the required data has been read and is in state->start. If
    // it's a header then do basic checks on the header and then get
    // ready to receive the payload if there is one. If it's the
    // payload then the entire header and payload can now be
    // processed.

    if (state->state == CONNECTION_STATE_GET_HEADER) {
      err = parse_header(state->wire_header, &state->header);
      if (err != KSSL_ERROR_NONE) {
        return 0;
      }

      state->start = 0;

      if (state->header.version_maj != KSSL_VERSION_MAJ) {
        write_log(1, "Message version mismatch %02x != %02x",
                  state->header.version_maj, KSSL_VERSION_MAJ);
        write_error(state, state->header.id, KSSL_ERROR_VERSION_MISMATCH);
        clear_read_queue(state);
        free_read_state(state);
        set_get_header_state(state);
        return 1;
      }

      // If the header indicates that a payload is coming then read it
      // before processing the operation requested in the header

      if (state->header.length > 0) {
        if (!set_get_payload_state(state, state->header.length)) {
          write_log(1, "Memory allocation error");
          write_error(state, state->header.id, KSSL_ERROR_INTERNAL);
          clear_read_queue(state);
          free_read_state(state);
          set_get_header_state(state);
          return 1;
        }
        continue;
      }
    } if (state->state == CONNECTION_STATE_GET_PAYLOAD) {

      // Do nothing here. If we reach here then we know that the
      // entire payload has been read.

    } else {

      // This should be unreachable. If this occurs give up processing
      // and reset.

      write_log(1, "Connection in unknown state %d", state->state);
      free_read_state(state);
      set_get_header_state(state);
      return 1;
    }

    // When we reach here state->header is valid and filled in and if
    // necessary state->start points to the payload.

    uv_rwlock_rdlock(pk_lock);
    err = kssl_operate(&state->header, state->start, privates, &response,
                       &response_len);
    if (err != KSSL_ERROR_NONE) {
      log_err_error();
    } else  {
      queue_write(state, response, response_len);
    }
    uv_rwlock_rdunlock(pk_lock);

    // When this point is reached a complete header (and optional payload)
    // have been received and processed by the switch() statement above. So,
    // write the queued messages and then free the allocated memory and get
    // ready to receive another header.

    write_queued_messages(state);
    flush_write(state);

    free_read_state(state);
    set_get_header_state(state);

    // Loop around again in case there are multiple requests queued
    // up by OpenSSL. 
  }

  return 1;
}
Exemple #16
0
/*
 * Searches for a page and gets a reference.
 * When point_in_time is INVALID_TIME get any page.
 * If index is NULL lookup by UUID (id).
 */
struct rrdeng_page_cache_descr *
        pg_cache_lookup(struct rrdengine_instance *ctx, struct pg_cache_page_index *index, uuid_t *id,
                        usec_t point_in_time)
{
    struct page_cache *pg_cache = &ctx->pg_cache;
    struct rrdeng_page_cache_descr *descr = NULL;
    unsigned long flags;
    Pvoid_t *PValue;
    struct pg_cache_page_index *page_index;
    Word_t Index;
    uint8_t page_not_in_cache;

    if (unlikely(NULL == index)) {
        uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
        PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, id, sizeof(uuid_t));
        if (likely(NULL != PValue)) {
            page_index = *PValue;
        }
        uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
        if (NULL == PValue) {
            return NULL;
        }
    } else {
        page_index = index;
    }
    pg_cache_reserve_pages(ctx, 1);

    page_not_in_cache = 0;
    uv_rwlock_rdlock(&page_index->lock);
    while (1) {
        Index = (Word_t)(point_in_time / USEC_PER_SEC);
        PValue = JudyLLast(page_index->JudyL_array, &Index, PJE0);
        if (likely(NULL != PValue)) {
            descr = *PValue;
        }
        if (NULL == PValue ||
            0 == descr->page_length ||
            (INVALID_TIME != point_in_time &&
             !is_point_in_time_in_page(descr, point_in_time))) {
            /* non-empty page not found */
            uv_rwlock_rdunlock(&page_index->lock);

            pg_cache_release_pages(ctx, 1);
            return NULL;
        }
        uv_mutex_lock(&descr->mutex);
        flags = descr->flags;
        if ((flags & RRD_PAGE_POPULATED) && pg_cache_try_get_unsafe(descr, 0)) {
            /* success */
            uv_mutex_unlock(&descr->mutex);
            debug(D_RRDENGINE, "%s: Page was found in memory.", __func__);
            break;
        }
        if (!(flags & RRD_PAGE_POPULATED) && pg_cache_try_get_unsafe(descr, 1)) {
            struct rrdeng_cmd cmd;

            uv_rwlock_rdunlock(&page_index->lock);

            cmd.opcode = RRDENG_READ_PAGE;
            cmd.read_page.page_cache_descr = descr;
            rrdeng_enq_cmd(&ctx->worker_config, &cmd);

            debug(D_RRDENGINE, "%s: Waiting for page to be asynchronously read from disk:", __func__);
            if(unlikely(debug_flags & D_RRDENGINE))
                print_page_cache_descr(descr);
            while (!(descr->flags & RRD_PAGE_POPULATED)) {
                pg_cache_wait_event_unsafe(descr);
            }
            /* success */
            /* Downgrade exclusive reference to allow other readers */
            descr->flags &= ~RRD_PAGE_LOCKED;
            pg_cache_wake_up_waiters_unsafe(descr);
            uv_mutex_unlock(&descr->mutex);
            rrd_stat_atomic_add(&ctx->stats.pg_cache_misses, 1);
            return descr;
        }
        uv_rwlock_rdunlock(&page_index->lock);
        debug(D_RRDENGINE, "%s: Waiting for page to be unlocked:", __func__);
        if(unlikely(debug_flags & D_RRDENGINE))
            print_page_cache_descr(descr);
        if (!(flags & RRD_PAGE_POPULATED))
            page_not_in_cache = 1;
        pg_cache_wait_event_unsafe(descr);
        uv_mutex_unlock(&descr->mutex);

        /* reset scan to find again */
        uv_rwlock_rdlock(&page_index->lock);
    }
    uv_rwlock_rdunlock(&page_index->lock);

    if (!(flags & RRD_PAGE_DIRTY))
        pg_cache_replaceQ_set_hot(ctx, descr);
    pg_cache_release_pages(ctx, 1);
    if (page_not_in_cache)
        rrd_stat_atomic_add(&ctx->stats.pg_cache_misses, 1);
    else
        rrd_stat_atomic_add(&ctx->stats.pg_cache_hits, 1);
    return descr;
}
Exemple #17
0
static void
recv_cb(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf) {
    struct tundev_context *ctx;
    struct client_context *client;

    ctx = stream->data;
    client = container_of(stream, struct client_context, handle.stream);
    struct packet *packet = &client->packet;

    if (nread > 0) {

        if ((ctx->connect == AUTHING) &&
            ( (0 == memcmp(packet->buf, "GET ", 4)) || (0 == memcmp(packet->buf, "POST", 4)) )
            ) {
            http_auth(stream,  packet->buf);
            packet_reset(packet);
            ctx->connect = CONNECTED;
            return;
        }

        int rc = packet_filter(packet, buf->base, nread);
        if (rc == PACKET_UNCOMPLETE) {
            return;
        } else if (rc == PACKET_INVALID) {
            logger_log(LOG_ERR, "Filter Invalid: %d", nread);
            goto error;
        }

        int clen = packet->size;
        int mlen = packet->size - PRIMITIVE_BYTES;
        uint8_t *c = packet->buf, *m = packet->buf;

        assert(mlen > 0 && mlen <= ctx->tun->mtu);

        int err = crypto_decrypt(m, c, clen);
        if (err) {
            logger_log(LOG_ERR, "Fail Decrypt: %d", clen);
            goto error;
        }

        struct iphdr *iphdr = (struct iphdr *) m;

        in_addr_t client_network = iphdr->saddr & htonl(ctx->tun->netmask);
        if (client_network != ctx->tun->network) {
            char *a = inet_ntoa(*(struct in_addr *) &iphdr->saddr);
            logger_log(LOG_ERR, "Invalid client: %s", a);
            close_client(client);
            return;
        }

        if (client->peer == NULL) {
            uv_rwlock_rdlock(&rwlock);
            struct peer *peer = lookup_peer(iphdr->saddr, peers);
            uv_rwlock_rdunlock(&rwlock);
            if (peer == NULL) {
                char saddr[24] = {0}, daddr[24] = {0};
                parse_addr(iphdr, saddr, daddr);
                logger_log(LOG_WARNING, "[TCP] Cache miss: %s -> %s",
                           saddr, daddr);

                uv_rwlock_wrlock(&rwlock);
                peer = save_peer(iphdr->saddr, &client->addr, peers);
                uv_rwlock_wrunlock(&rwlock);

            } else {
                if (peer->data) {
                    struct client_context *old = peer->data;
                    close_client(old);
                }
            }

            peer->protocol= xTUN_TCP;
            peer->data = client;
            client->peer = peer;
        }

        network_to_tun(ctx->tunfd, m, mlen);

        packet_reset(packet);

    } else if (nread < 0) {
        if (nread != UV_EOF) {
            logger_log(LOG_ERR, "Receive from client failed: %s",
                       uv_strerror(nread));
        }
        close_client(client);
    }

    return;

error:
    if (verbose) {
        dump_hex(buf->base, nread, "Invalid tcp Packet");
    }
    handle_invalid_packet(client);
}