Exemplo n.º 1
0
void *
fqd_worker_thread(void *arg)
{
  struct incoming_message *m;
  int tindex = *(int*) arg;

  ck_fifo_spsc_t *q = &work_queues[tindex];
  uint32_t *backlog = &work_queue_backlogs[tindex];

  while (!worker_thread_shutdown) {
    if (!CK_FIFO_SPSC_ISEMPTY(q)) {
      if (ck_fifo_spsc_dequeue(q, &m)) {
        if (m != NULL) {
          ck_pr_dec_32(backlog);
          fq_msg *copy = fq_msg_alloc_BLANK(m->msg->payload_len);
          if (copy == NULL) {
            continue;
          }
          memcpy(copy, m->msg, sizeof(fq_msg) + m->msg->payload_len);
          /* reset the refcnt on the copy since the memcpy above will have overwritten it */
          copy->refcnt = 1;
          /* the copy will be freed as normal so eliminate cleanup_stack pointer */
          copy->cleanup_stack = NULL;

          /* we are done with the incoming message, deref */
          fq_msg_deref(m->msg);

          fqd_inject_message(m->client, copy);
	  fqd_remote_client_deref((remote_client *)m->client);
          free(m);
        }
      }
    } else {
      usleep(1000);
    }
  }

  /* drain the queue before exiting */
  if (!CK_FIFO_SPSC_ISEMPTY(q)) {
    ck_fifo_spsc_dequeue_lock(q);
    if (ck_fifo_spsc_dequeue(q, &m)) {
      if (m != NULL) {
        ck_pr_dec_32(backlog);
        fq_msg *copy = fq_msg_alloc_BLANK(m->msg->payload_len);
        memcpy(copy, m->msg, sizeof(fq_msg) + m->msg->payload_len);
        /* the copy will be freed as normal so eliminate cleanup_stack pointer */
        copy->cleanup_stack = NULL;
        /* we are done with the incoming message, drop it on it's cleanup stack */
        fqd_inject_message(m->client, copy);
        fq_msg_deref(m->msg);
        free(m);
      }
    }
    ck_fifo_spsc_dequeue_unlock(q);
  }
  usleep(10);
  return NULL;
}
Exemplo n.º 2
0
void rwmsg_destination_destroy(rwmsg_destination_t *dt) {
    RW_ASSERT_TYPE(dt, rwmsg_destination_t);
    if (!dt->refct) {
        rwmsg_endpoint_t *ep = dt->ep;
        RW_ASSERT(ep);

        //?? TBD rwmsg_endpoint_del_srvchan_method_binding(ep, sc, ...);

        RWMSG_EP_LOCK(ep);
        RW_DL_REMOVE(&ep->track.destinations, dt, trackelem);
        RWMSG_EP_UNLOCK(ep);
        ck_pr_dec_32(&ep->stat.objects.destinations);

        if (dt->localep) {
            rwmsg_endpoint_release(dt->localep);
            dt->localep = NULL;
        }

        if (dt->defstream.localsc) {
            _RWMSG_CH_DEBUG_(&dt->defstream.localsc->ch, "--");
            rwmsg_srvchan_release(dt->defstream.localsc);
            dt->defstream.localsc = NULL;
        }

        RW_FREE_TYPE(dt, rwmsg_destination_t);
        dt = NULL;
    }
    return;
}
Exemplo n.º 3
0
static void
rwsched_instance_free_int(rwsched_instance_t *instance)
{
  // Validate input paraemters
  RW_CF_TYPE_VALIDATE(instance, rwsched_instance_ptr_t);
  //FIXME
  if (!g_atomic_int_dec_and_test(&instance->ref_cnt)) {
    return;
  }
#if 1
  RW_FREE_TYPE(instance->default_rwqueue, rwsched_dispatch_queue_t);
  RW_FREE_TYPE(instance->main_rwqueue, rwsched_dispatch_queue_t);
  long i;
  for (i=0; i<RWSCHED_DISPATCH_QUEUE_GLOBAL_CT; i++) {
    instance->global_rwqueue[i].rwq->header.libdispatch_object._dq = NULL;
    RW_FREE_TYPE(instance->global_rwqueue[i].rwq, rwsched_dispatch_queue_t);
    instance->global_rwqueue[i].rwq = NULL;
  }

  ck_pr_dec_32(&g_rwsched_instance_count);
  if (instance->rwlog_instance) {
    rwlog_close(instance->rwlog_instance, FALSE);
  }
  //NO-FREE
  RW_CF_TYPE_FREE(instance, rwsched_instance_ptr_t);
#endif
}
Exemplo n.º 4
0
static inline void 
fq_push_free_message_stack(struct free_message_stack *stack, fq_msg *m) 
{
  if (stack == NULL) {
    return;
  }

  while(ck_pr_load_32(&stack->size) > stack->max_size) {
    ck_stack_entry_t *ce = ck_stack_pop_mpmc(&stack->stack);
    if (ce != NULL) {
      fq_msg *m = container_of(ce, fq_msg, cleanup_stack_entry);
      free(m);
      ck_pr_dec_32(&stack->size);
    }
    else break;
  }
  uint32_t c = ck_pr_load_32(&stack->size);
  if (c >= stack->max_size) {
    free(m);
    return;
  }

  ck_pr_inc_32(&stack->size);
  ck_stack_push_mpmc(&stack->stack, &m->cleanup_stack_entry);
}
Exemplo n.º 5
0
static bool
as_uv_queue_close_connections(as_node* node, as_queue* conn_queue, as_queue* cmd_queue)
{
	as_uv_command qcmd;
	qcmd.type = AS_UV_CLOSE_CONNECTION;
	
	as_event_connection* conn;
	
	// Queue connection commands to event loops.
	while (as_queue_pop(conn_queue, &conn)) {
		qcmd.ptr = conn;
		
		if (! as_queue_push(cmd_queue, &qcmd)) {
			as_log_error("Failed to queue connection close");
			return false;
		}
		
		// In this case, connection counts are decremented before the connection is closed.
		// This is done because the node will be invalid when the deferred connection close occurs.
		// Since node destroy always waits till there are no node references, all transactions that
		// referenced this node should be completed by the time this code is executed.
		as_event_decr_connection(node->cluster, conn_queue);
		ck_pr_dec_32(&node->cluster->async_conn_pool);
	}
	return true;
}
Exemplo n.º 6
0
static void
as_ev_close_connections(as_node* node, as_queue* conn_queue)
{
	as_event_connection* conn;
	
	// Queue connection commands to event loops.
	while (as_queue_pop(conn_queue, &conn)) {
		close(conn->fd);
		cf_free(conn);
		as_event_decr_connection(node->cluster, conn_queue);
		ck_pr_dec_32(&node->cluster->async_conn_pool);
	}
	as_queue_destroy(conn_queue);
}
Exemplo n.º 7
0
static inline fq_msg *
fq_pop_free_message_stack(struct free_message_stack *stack)
{
    fq_msg *rv = NULL;
    if (stack == NULL) {
        return rv;
    }

    ck_stack_entry_t *ce = ck_stack_pop_mpmc(&stack->stack);
    if (ce != NULL) {
        ck_pr_dec_32(&stack->size);
        rv = container_of(ce, fq_msg, cleanup_stack_entry);
    }
    return rv;
}
Exemplo n.º 8
0
void rwmsg_method_destroy(rwmsg_endpoint_t *ep,
			  rwmsg_method_t *meth) {
  if (!meth->refct) {
    if (meth->pathidx) {
      rwmsg_bool_t prval = rwmsg_endpoint_path_del(ep, meth->pathidx);
      RW_ASSERT(prval);
    }

    RW_ASSERT(meth->sig);
    RW_ASSERT(meth->sig->refct > 0);
    rwmsg_signature_release(ep, meth->sig);

    RW_FREE_TYPE(meth, rwmsg_method_t);
    ck_pr_dec_32(&ep->stat.objects.methods);
  }
}
Exemplo n.º 9
0
int _segment_list_release_segment_for_reading(struct segment_list *segment_list, uint32_t segment_number) {
    ck_rwlock_read_lock(segment_list->lock);

    segment_t *segment = __segment_number_to_segment(segment_list, segment_number);

    // TODO: make this an actual error
    ensure(__is_segment_number_in_segment_list_inlock(segment_list, segment_number),
           "Attempted to release a segment not in the list");

    ensure(segment->state != FREE, "Attempted to release segment in the FREE state");
    ensure(segment->state != CLOSED, "Attempted to release segment in the CLOSED state");
    ensure(segment->state != WRITING, "Attempted to release reading segment in the WRITING state");

    ck_pr_dec_32(&segment->refcount);

    ck_rwlock_read_unlock(segment_list->lock);
    return 0;
}
Exemplo n.º 10
0
as_connection_status
as_event_get_connection(as_event_command* cmd)
{
    as_queue* queue = &cmd->node->async_conn_qs[cmd->event_loop->index];
    as_async_connection* conn;

    // Find connection.
    while (as_queue_pop(queue, &conn)) {
        ck_pr_dec_32(&cmd->cluster->async_conn_pool);

        // Verify that socket is active and receive buffer is empty.
        int len = as_event_validate_connection(&conn->base);

        if (len == 0) {
            conn->cmd = cmd;
            cmd->conn = (as_event_connection*)conn;
            return AS_CONNECTION_FROM_POOL;
        }

        as_log_debug("Invalid async socket from pool: %d", len);
        as_event_release_connection(cmd->cluster, &conn->base, queue);
    }

    // Create connection structure only when node connection count within queue limit.
    if (as_queue_incr_total(queue)) {
        ck_pr_inc_32(&cmd->cluster->async_conn_count);
        conn = cf_malloc(sizeof(as_async_connection));
        conn->base.pipeline = false;
        conn->cmd = cmd;
        cmd->conn = &conn->base;
        return AS_CONNECTION_NEW;
    }
    else {
        as_error err;
        as_error_update(&err, AEROSPIKE_ERR_NO_MORE_CONNECTIONS,
                        "Max node/event loop %s async connections would be exceeded: %u",
                        cmd->node->name, queue->capacity);
        as_event_stop_timer(cmd);
        as_event_error_callback(cmd, &err);
        return AS_CONNECTION_TOO_MANY;
    }
}
Exemplo n.º 11
0
static void
rwsched_tasklet_free_int(rwsched_tasklet_t *sched_tasklet)
{
  RW_CF_TYPE_VALIDATE(sched_tasklet, rwsched_tasklet_ptr_t);
  rwsched_instance_ptr_t instance = sched_tasklet->instance;
  RW_CF_TYPE_VALIDATE(instance, rwsched_instance_ptr_t);

  if (!g_atomic_int_dec_and_test(&sched_tasklet->ref_cnt)) {
    return;
  }

  int i;
  for (i = 0 ; i < RWSCHED_MAX_SIGNALS; i++) {
    if (sched_tasklet->signal_dtor[i]) {
      sched_tasklet->signal_dtor[i](sched_tasklet->signal_dtor_ud[i]);
    }
    sched_tasklet->signal_dtor[i]= NULL;
    sched_tasklet->signal_dtor_ud[i]= NULL;
  }

  for (i = 1 ; i < instance->tasklet_array->len ; i++) {
    if (g_array_index(instance->tasklet_array, rwsched_tasklet_ptr_t, i) == sched_tasklet) {
      g_array_remove_index (instance->tasklet_array, i);
      break;
    }
  }

  rwsched_CFRunLoopTimerRef rw_timer;
  while ((rw_timer = g_array_index(sched_tasklet->cftimer_array, rwsched_CFRunLoopTimerRef, 1)) != NULL) {
    RW_CF_TYPE_VALIDATE(rw_timer, rwsched_CFRunLoopTimerRef);
    rwsched_tasklet_CFRunLoopTimerInvalidate(sched_tasklet, rw_timer);
    g_array_remove_index (sched_tasklet->cftimer_array, 1);
  }
  g_array_free(sched_tasklet->cftimer_array, TRUE);

  rwsched_CFSocketRef rw_socket;
  while ((rw_socket = g_array_index(sched_tasklet->cfsocket_array, rwsched_CFSocketRef, 1)) != NULL) {
    RW_CF_TYPE_VALIDATE(rw_socket, rwsched_CFSocketRef);
    rwsched_tasklet_CFSocketRelease(sched_tasklet, rw_socket);
    //g_array_remove_index (sched_tasklet->cfsocket_array, 1);
  }
  g_array_free(sched_tasklet->cfsocket_array, TRUE);

  rwsched_CFRunLoopSourceRef rw_source;
  while ((rw_source = g_array_index(sched_tasklet->cfsource_array, rwsched_CFRunLoopSourceRef, 1)) != NULL) {
    RW_CF_TYPE_VALIDATE(rw_source, rwsched_CFRunLoopSourceRef);
    rwsched_tasklet_CFSocketReleaseRunLoopSource(sched_tasklet, rw_source);
    g_array_remove_index (sched_tasklet->cfsource_array, 1);
  }
  g_array_free(sched_tasklet->cfsource_array, TRUE);

  rwsched_dispatch_what_ptr_t what;
  while ((what = g_array_index(sched_tasklet->dispatch_what_array, rwsched_dispatch_what_ptr_t, 1)) != NULL) {
    RW_FREE_TYPE(what, rwsched_dispatch_what_ptr_t);
    g_array_remove_index (sched_tasklet->dispatch_what_array, 1);
  }
  g_array_free(sched_tasklet->dispatch_what_array, TRUE);

  ck_pr_dec_32(&g_rwsched_tasklet_count);
  //NO-FREE
  RW_CF_TYPE_FREE(sched_tasklet, rwsched_tasklet_ptr_t);
  rwsched_instance_unref(instance);
}