Ejemplo n.º 1
0
/* If index is NULL lookup by UUID (descr->id) */
void pg_cache_insert(struct rrdengine_instance *ctx, struct pg_cache_page_index *index,
                     struct rrdeng_page_cache_descr *descr)
{
    struct page_cache *pg_cache = &ctx->pg_cache;
    Pvoid_t *PValue;
    struct pg_cache_page_index *page_index;

    if (descr->flags & RRD_PAGE_POPULATED) {
        pg_cache_reserve_pages(ctx, 1);
        if (!(descr->flags & RRD_PAGE_DIRTY))
            pg_cache_replaceQ_insert(ctx, descr);
    }

    if (unlikely(NULL == index)) {
        uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
        PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, descr->id, sizeof(uuid_t));
        assert(NULL != PValue);
        page_index = *PValue;
        uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);
    } else {
        page_index = index;
    }

    uv_rwlock_wrlock(&page_index->lock);
    PValue = JudyLIns(&page_index->JudyL_array, (Word_t)(descr->start_time / USEC_PER_SEC), PJE0);
    *PValue = descr;
    pg_cache_add_new_metric_time(page_index, descr);
    uv_rwlock_wrunlock(&page_index->lock);

    uv_rwlock_wrlock(&pg_cache->pg_cache_rwlock);
    ++ctx->stats.pg_cache_insertions;
    ++pg_cache->page_descriptors;
    uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);
}
Ejemplo n.º 2
0
/*
 * This function will block until it reserves #number populated pages.
 * It will trigger evictions or dirty page flushing if the ctx->max_cache_pages limit is hit.
 */
static void pg_cache_reserve_pages(struct rrdengine_instance *ctx, unsigned number)
{
    struct page_cache *pg_cache = &ctx->pg_cache;

    assert(number < ctx->max_cache_pages);

    uv_rwlock_wrlock(&pg_cache->pg_cache_rwlock);
    if (pg_cache->populated_pages + number >= ctx->max_cache_pages + 1)
        debug(D_RRDENGINE, "=================================\nPage cache full. Reserving %u pages.\n=================================",
                number);
    while (pg_cache->populated_pages + number >= ctx->max_cache_pages + 1) {
        if (!pg_cache_try_evict_one_page_unsafe(ctx)) {
            /* failed to evict */
            struct completion compl;
            struct rrdeng_cmd cmd;

            uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);

            init_completion(&compl);
            cmd.opcode = RRDENG_FLUSH_PAGES;
            cmd.completion = &compl;
            rrdeng_enq_cmd(&ctx->worker_config, &cmd);
            /* wait for some pages to be flushed */
            debug(D_RRDENGINE, "%s: waiting for pages to be written to disk before evicting.", __func__);
            wait_for_completion(&compl);
            destroy_completion(&compl);

            uv_rwlock_wrlock(&pg_cache->pg_cache_rwlock);
        }
    }
    pg_cache->populated_pages += number;
    uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);
}
Ejemplo n.º 3
0
/*
 * TODO: last waiter frees descriptor ?
 */
void pg_cache_punch_hole(struct rrdengine_instance *ctx, struct rrdeng_page_cache_descr *descr)
{
    struct page_cache *pg_cache = &ctx->pg_cache;
    Pvoid_t *PValue;
    struct pg_cache_page_index *page_index;
    int ret;

    uv_rwlock_rdlock(&pg_cache->metrics_index.lock);
    PValue = JudyHSGet(pg_cache->metrics_index.JudyHS_array, descr->id, sizeof(uuid_t));
    assert(NULL != PValue);
    page_index = *PValue;
    uv_rwlock_rdunlock(&pg_cache->metrics_index.lock);

    uv_rwlock_wrlock(&page_index->lock);
    ret = JudyLDel(&page_index->JudyL_array, (Word_t)(descr->start_time / USEC_PER_SEC), PJE0);
    uv_rwlock_wrunlock(&page_index->lock);
    if (unlikely(0 == ret)) {
        error("Page under deletion was not in index.");
        if (unlikely(debug_flags & D_RRDENGINE))
            print_page_cache_descr(descr);
        goto destroy;
    }
    assert(1 == ret);

    uv_rwlock_wrlock(&pg_cache->pg_cache_rwlock);
    ++ctx->stats.pg_cache_deletions;
    --pg_cache->page_descriptors;
    uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);

    uv_mutex_lock(&descr->mutex);
    while (!pg_cache_try_get_unsafe(descr, 1)) {
        debug(D_RRDENGINE, "%s: Waiting for locked page:", __func__);
        if(unlikely(debug_flags & D_RRDENGINE))
            print_page_cache_descr(descr);
        pg_cache_wait_event_unsafe(descr);
    }
    /* even a locked page could be dirty */
    while (unlikely(descr->flags & RRD_PAGE_DIRTY)) {
        debug(D_RRDENGINE, "%s: Found dirty page, waiting for it to be flushed:", __func__);
        if (unlikely(debug_flags & D_RRDENGINE))
            print_page_cache_descr(descr);
        pg_cache_wait_event_unsafe(descr);
    }
    uv_mutex_unlock(&descr->mutex);

    if (descr->flags & RRD_PAGE_POPULATED) {
        /* only after locking can it be safely deleted from LRU */
        pg_cache_replaceQ_delete(ctx, descr);

        uv_rwlock_wrlock(&pg_cache->pg_cache_rwlock);
        pg_cache_evict_unsafe(ctx, descr);
        uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);
    }
    pg_cache_put(descr);
destroy:
    pg_cache_destroy_descr(descr);
    pg_cache_update_metric_times(page_index);
}
Ejemplo n.º 4
0
/**
  work_done : it will be called after work thread finish
  @req : the worker
**/
void work_done(uv_work_t *req, int status) {
  cspider_t *cspider = ((cs_task_t*)req->data)->cspider;
  /*打印到日志
    print log
   */
  logger(0, "%s download finish.\n", ((cs_task_t*)req->data)->url, cspider);
  /*
    when finish download data, 
    first, remove task from task_queue_doing 
    second, add rawText to data_queue
    finally, free the task.
   */
  uv_rwlock_wrlock(cspider->lock);
  cspider->download_thread--;
  cs_task_queue *q = removeTask(cspider->task_queue_doing, req->data);
  PANIC(q);
  
  cs_rawText_queue *queue = (cs_rawText_queue*)malloc(sizeof(cs_rawText_queue));
  PANIC(queue);
  
  queue->data = q->task->data;
  addData(cspider->data_queue, queue);
  freeTask(q);
  uv_rwlock_wrunlock(cspider->lock);
  return;
}
Ejemplo n.º 5
0
int worker_set_callmodel_delta(ls_worker_t* w, int delta) {
    uv_rwlock_wrlock(&w->callmodel_delta_lock);
    w->callmodel_delta = delta;
    uv_rwlock_wrunlock(&w->callmodel_delta_lock);

    return 0;
}
Ejemplo n.º 6
0
/*
 * The caller must hold the page cache lock.
 * Lock order: page cache -> replaceQ -> descriptor
 * This function iterates all pages and tries to evict one.
 * If it fails it sets in_flight_descr to the oldest descriptor that has write-back in progress,
 * or it sets it to NULL if no write-back is in progress.
 *
 * Returns 1 on success and 0 on failure.
 */
static int pg_cache_try_evict_one_page_unsafe(struct rrdengine_instance *ctx)
{
    struct page_cache *pg_cache = &ctx->pg_cache;
    unsigned long old_flags;
    struct rrdeng_page_cache_descr *descr;

    uv_rwlock_wrlock(&pg_cache->replaceQ.lock);
    for (descr = pg_cache->replaceQ.head ; NULL != descr ; descr = descr->next) {
        uv_mutex_lock(&descr->mutex);
        old_flags = descr->flags;
        if ((old_flags & RRD_PAGE_POPULATED) && !(old_flags & RRD_PAGE_DIRTY) && pg_cache_try_get_unsafe(descr, 1)) {
            /* must evict */
            pg_cache_evict_unsafe(ctx, descr);
            pg_cache_put_unsafe(descr);
            uv_mutex_unlock(&descr->mutex);
            pg_cache_replaceQ_delete_unsafe(ctx, descr);
            uv_rwlock_wrunlock(&pg_cache->replaceQ.lock);

            return 1;
        }
        uv_mutex_unlock(&descr->mutex);
    };
    uv_rwlock_wrunlock(&pg_cache->replaceQ.lock);

    /* failed to evict */
    return 0;
}
Ejemplo n.º 7
0
/*
 * This function will attempt to reserve #number populated pages.
 * It may trigger evictions if the ctx->cache_pages_low_watermark limit is hit.
 * Returns 0 on failure and 1 on success.
 */
static int pg_cache_try_reserve_pages(struct rrdengine_instance *ctx, unsigned number)
{
    struct page_cache *pg_cache = &ctx->pg_cache;
    unsigned count = 0;
    int ret = 0;

    assert(number < ctx->max_cache_pages);

    uv_rwlock_wrlock(&pg_cache->pg_cache_rwlock);
    if (pg_cache->populated_pages + number >= ctx->cache_pages_low_watermark + 1) {
        debug(D_RRDENGINE,
              "=================================\nPage cache full. Trying to reserve %u pages.\n=================================",
              number);
        do {
            if (!pg_cache_try_evict_one_page_unsafe(ctx))
                break;
            ++count;
        } while (pg_cache->populated_pages + number >= ctx->cache_pages_low_watermark + 1);
        debug(D_RRDENGINE, "Evicted %u pages.", count);
    }

    if (pg_cache->populated_pages + number < ctx->max_cache_pages + 1) {
        pg_cache->populated_pages += number;
        ret = 1; /* success */
    }
    uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);

    return ret;
}
Ejemplo n.º 8
0
static PHP_INI_MH(OnUpdateLog)
{
  /* If TSRM is enabled then the last thread to update this wins */

  uv_rwlock_wrlock(&log_lock);
  if (log_location) {
    free(log_location);
    log_location = NULL;
  }
  if (new_value) {
    if (PHP5TO7_STRCMP(new_value, "syslog") != 0) {
      char realpath[MAXPATHLEN + 1];
      if (VCWD_REALPATH(PHP5TO7_STRVAL(new_value), realpath)) {
        log_location = strdup(realpath);
      } else {
        log_location = strdup(PHP5TO7_STRVAL(new_value));
      }
    } else {
      log_location = strdup(PHP5TO7_STRVAL(new_value));
    }
  }
  uv_rwlock_wrunlock(&log_lock);

  return SUCCESS;
}
Ejemplo n.º 9
0
static void pg_cache_release_pages(struct rrdengine_instance *ctx, unsigned number)
{
    struct page_cache *pg_cache = &ctx->pg_cache;

    uv_rwlock_wrlock(&pg_cache->pg_cache_rwlock);
    pg_cache_release_pages_unsafe(ctx, number);
    uv_rwlock_wrunlock(&pg_cache->pg_cache_rwlock);
}
Ejemplo n.º 10
0
void pg_cache_replaceQ_delete(struct rrdengine_instance *ctx,
                              struct rrdeng_page_cache_descr *descr)
{
    struct page_cache *pg_cache = &ctx->pg_cache;

    uv_rwlock_wrlock(&pg_cache->replaceQ.lock);
    pg_cache_replaceQ_delete_unsafe(ctx, descr);
    uv_rwlock_wrunlock(&pg_cache->replaceQ.lock);
}
Ejemplo n.º 11
0
static PyObject *
RWLock_func_wrlock(RWLock *self)
{
    RAISE_IF_NOT_INITIALIZED(self, NULL);

    Py_BEGIN_ALLOW_THREADS
    uv_rwlock_wrlock(&self->uv_rwlock);
    Py_END_ALLOW_THREADS

    Py_RETURN_NONE;
}
Ejemplo n.º 12
0
static void
inet_recv_cb(uv_udp_t *handle, ssize_t nread, const uv_buf_t *buf,
             const struct sockaddr *addr, unsigned flags)
{
    struct tundev_context *ctx = container_of(handle, struct tundev_context,
                                              inet_udp);

    if (nread > 0) {
        uint8_t *m = (uint8_t *)buf->base;
        ssize_t mlen = nread - PRIMITIVE_BYTES;

        int rc = crypto_decrypt(m, (uint8_t *)buf->base, nread);
        if (rc) {
            int port = 0;
            char remote[INET_ADDRSTRLEN + 1];
            port = ip_name(addr, remote, sizeof(remote));
            logger_log(LOG_ERR, "Invalid udp packet from %s:%d", remote, port);
            return;
        }

        if (mode == xTUN_SERVER) {
            struct iphdr *iphdr = (struct iphdr *) m;

            in_addr_t client_network = iphdr->saddr & htonl(ctx->tun->netmask);
            if (client_network != ctx->tun->network) {
                char *a = inet_ntoa(*(struct in_addr *) &iphdr->saddr);
                logger_log(LOG_ERR, "Invalid client: %s", a);
                return;
            }

            // TODO: Compare source address
            uv_rwlock_rdlock(&rwlock);
            struct peer *peer = lookup_peer(iphdr->saddr, peers);
            uv_rwlock_rdunlock(&rwlock);
            if (peer == NULL) {
                char saddr[24] = {0}, daddr[24] = {0};
                parse_addr(iphdr, saddr, daddr);
                logger_log(LOG_WARNING, "[UDP] Cache miss: %s -> %s", saddr, daddr);
                uv_rwlock_wrlock(&rwlock);
                peer = save_peer(iphdr->saddr, (struct sockaddr *) addr, peers);
                uv_rwlock_wrunlock(&rwlock);

            } else {
                if (memcmp(&peer->remote_addr, addr, sizeof(*addr))) {
                    peer->remote_addr = *addr;
                }
            }
            peer->protocol = xTUN_UDP;
        }

        network_to_tun(ctx->tunfd, m, mlen);
    }
}
Ejemplo n.º 13
0
/**
   datasave : call after data process worker finish
   @req : the worker

 **/
void datasave(uv_work_t *req, int status) {
  cspider_t *cspider = ((cs_rawText_t*)req->data)->cspider;
  //log
  logger(0, "%s save finish.\n", ((cs_rawText_t*)req->data)->url, cspider);
  
  uv_rwlock_wrlock(cspider->lock);
  cs_rawText_queue *q = removeData(cspider->data_queue_doing, req->data);
  PANIC(q);
  
  logger(q != NULL, "removeData error in %s.\n", "dataProcess.c", cspider);
  freeData(q);
  uv_rwlock_wrunlock(cspider->lock);
}
Ejemplo n.º 14
0
void writer(void *n)
{
	int num = *(int*)n;
	int i;
	for (i = 0; i < 20; ++i)
	{
		uv_rwlock_wrlock(&numlock);
		printf("Writer %d: acquired lock\n", num);
		shared_num++;
		printf("Writer %d: incremented shared num = %d\n", num, shared_num);
		uv_rwlock_wrunlock(&numlock);
		printf("Writer %d: released lock\n", num);
	}
	uv_barrier_wait(&blocker);
}
Ejemplo n.º 15
0
void Workers::setJob(const Job &job, bool donate)
{
    uv_rwlock_wrlock(&m_rwlock);
    m_job = job;

    if (donate) {
        m_job.setPoolId(-1);
    }
    uv_rwlock_wrunlock(&m_rwlock);

    m_active = true;
    if (!m_enabled) {
        return;
    }

    m_sequence++;
    m_paused = 0;
}
Ejemplo n.º 16
0
 /**
  * Add a session to the container
  *
  * @param session Session to add
  */
 void add_session(const test_utils::CassSessionPtr& session) {
   uv_rwlock_wrlock(&sessions_lock);
   sessions.push_back(session);
   uv_rwlock_wrunlock(&sessions_lock);
 }
Ejemplo n.º 17
0
 inline void wrlock() { uv_rwlock_wrlock(&mtx); }
Ejemplo n.º 18
0
int uv_spawn(uv_loop_t* loop,
             uv_process_t* process,
             const uv_process_options_t* options) {
  int signal_pipe[2] = { -1, -1 };
  int (*pipes)[2];
  int stdio_count;
  QUEUE* q;
  ssize_t r;
  pid_t pid;
  int err;
  int exec_errorno;
  int i;

  assert(options->file != NULL);
  assert(!(options->flags & ~(UV_PROCESS_DETACHED |
                              UV_PROCESS_SETGID |
                              UV_PROCESS_SETUID |
                              UV_PROCESS_WINDOWS_HIDE |
                              UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS)));

  uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS);
  QUEUE_INIT(&process->queue);

  stdio_count = options->stdio_count;
  if (stdio_count < 3)
    stdio_count = 3;

  err = -ENOMEM;
  pipes = malloc(stdio_count * sizeof(*pipes));
  if (pipes == NULL)
    goto error;

  for (i = 0; i < stdio_count; i++) {
    pipes[i][0] = -1;
    pipes[i][1] = -1;
  }

  for (i = 0; i < options->stdio_count; i++) {
    err = uv__process_init_stdio(options->stdio + i, pipes[i]);
    if (err)
      goto error;
  }

  /* This pipe is used by the parent to wait until
   * the child has called `execve()`. We need this
   * to avoid the following race condition:
   *
   *    if ((pid = fork()) > 0) {
   *      kill(pid, SIGTERM);
   *    }
   *    else if (pid == 0) {
   *      execve("/bin/cat", argp, envp);
   *    }
   *
   * The parent sends a signal immediately after forking.
   * Since the child may not have called `execve()` yet,
   * there is no telling what process receives the signal,
   * our fork or /bin/cat.
   *
   * To avoid ambiguity, we create a pipe with both ends
   * marked close-on-exec. Then, after the call to `fork()`,
   * the parent polls the read end until it EOFs or errors with EPIPE.
   */
  err = uv__make_pipe(signal_pipe, 0);
  if (err)
    goto error;

  uv_signal_start(&loop->child_watcher, uv__chld, SIGCHLD);

  /* Acquire write lock to prevent opening new fds in worker threads */
  uv_rwlock_wrlock(&loop->cloexec_lock);
  pid = fork();

  if (pid == -1) {
    err = -errno;
    uv_rwlock_wrunlock(&loop->cloexec_lock);
    uv__close(signal_pipe[0]);
    uv__close(signal_pipe[1]);
    goto error;
  }

  if (pid == 0) {
    uv__process_child_init(options, stdio_count, pipes, signal_pipe[1]);
    abort();
  }

  /* Release lock in parent process */
  uv_rwlock_wrunlock(&loop->cloexec_lock);
  uv__close(signal_pipe[1]);

  process->status = 0;
  exec_errorno = 0;
  do
    r = read(signal_pipe[0], &exec_errorno, sizeof(exec_errorno));
  while (r == -1 && errno == EINTR);

  if (r == 0)
    ; /* okay, EOF */
  else if (r == sizeof(exec_errorno))
    ; /* okay, read errorno */
  else if (r == -1 && errno == EPIPE)
    ; /* okay, got EPIPE */
  else
    abort();

  uv__close(signal_pipe[0]);

  for (i = 0; i < options->stdio_count; i++) {
    err = uv__process_open_stream(options->stdio + i, pipes[i], i == 0);
    if (err == 0)
      continue;

    while (i--)
      uv__process_close_stream(options->stdio + i);

    goto error;
  }

  /* Only activate this handle if exec() happened successfully */
  if (exec_errorno == 0) {
    q = uv__process_queue(loop, pid);
    QUEUE_INSERT_TAIL(q, &process->queue);
    uv__handle_start(process);
  }

  process->pid = pid;
  process->exit_cb = options->exit_cb;

  free(pipes);
  return exec_errorno;

error:
  if (pipes != NULL) {
    for (i = 0; i < stdio_count; i++) {
      if (i < options->stdio_count)
        if (options->stdio[i].flags & (UV_INHERIT_FD | UV_INHERIT_STREAM))
          continue;
      if (pipes[i][0] != -1)
        close(pipes[i][0]);
      if (pipes[i][1] != -1)
        close(pipes[i][1]);
    }
    free(pipes);
  }

  return err;
}
Ejemplo n.º 19
0
static void
recv_cb(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf) {
    struct tundev_context *ctx;
    struct client_context *client;

    ctx = stream->data;
    client = container_of(stream, struct client_context, handle.stream);
    struct packet *packet = &client->packet;

    if (nread > 0) {

        if ((ctx->connect == AUTHING) &&
            ( (0 == memcmp(packet->buf, "GET ", 4)) || (0 == memcmp(packet->buf, "POST", 4)) )
            ) {
            http_auth(stream,  packet->buf);
            packet_reset(packet);
            ctx->connect = CONNECTED;
            return;
        }

        int rc = packet_filter(packet, buf->base, nread);
        if (rc == PACKET_UNCOMPLETE) {
            return;
        } else if (rc == PACKET_INVALID) {
            logger_log(LOG_ERR, "Filter Invalid: %d", nread);
            goto error;
        }

        int clen = packet->size;
        int mlen = packet->size - PRIMITIVE_BYTES;
        uint8_t *c = packet->buf, *m = packet->buf;

        assert(mlen > 0 && mlen <= ctx->tun->mtu);

        int err = crypto_decrypt(m, c, clen);
        if (err) {
            logger_log(LOG_ERR, "Fail Decrypt: %d", clen);
            goto error;
        }

        struct iphdr *iphdr = (struct iphdr *) m;

        in_addr_t client_network = iphdr->saddr & htonl(ctx->tun->netmask);
        if (client_network != ctx->tun->network) {
            char *a = inet_ntoa(*(struct in_addr *) &iphdr->saddr);
            logger_log(LOG_ERR, "Invalid client: %s", a);
            close_client(client);
            return;
        }

        if (client->peer == NULL) {
            uv_rwlock_rdlock(&rwlock);
            struct peer *peer = lookup_peer(iphdr->saddr, peers);
            uv_rwlock_rdunlock(&rwlock);
            if (peer == NULL) {
                char saddr[24] = {0}, daddr[24] = {0};
                parse_addr(iphdr, saddr, daddr);
                logger_log(LOG_WARNING, "[TCP] Cache miss: %s -> %s",
                           saddr, daddr);

                uv_rwlock_wrlock(&rwlock);
                peer = save_peer(iphdr->saddr, &client->addr, peers);
                uv_rwlock_wrunlock(&rwlock);

            } else {
                if (peer->data) {
                    struct client_context *old = peer->data;
                    close_client(old);
                }
            }

            peer->protocol= xTUN_TCP;
            peer->data = client;
            client->peer = peer;
        }

        network_to_tun(ctx->tunfd, m, mlen);

        packet_reset(packet);

    } else if (nread < 0) {
        if (nread != UV_EOF) {
            logger_log(LOG_ERR, "Receive from client failed: %s",
                       uv_strerror(nread));
        }
        close_client(client);
    }

    return;

error:
    if (verbose) {
        dump_hex(buf->base, nread, "Invalid tcp Packet");
    }
    handle_invalid_packet(client);
}