Пример #1
0
rwmsg_destination_t *rwmsg_destination_create(rwmsg_endpoint_t *ep,
        rwmsg_addrtype_t atype,
        const char *addrpath) {
    RW_ASSERT_TYPE(ep, rwmsg_endpoint_t);
    rwmsg_destination_t *dt = NULL;

    int plen = strlen(addrpath);
    if (plen >= RWMSG_PATH_MAX) {
        goto ret;
    }

    dt = (rwmsg_destination_t*)RW_MALLOC0_TYPE(sizeof(*dt), rwmsg_destination_t);
    dt->ep = ep;
    dt->refct = 1;

    RWMSG_EP_LOCK(ep);
    RW_DL_INSERT_HEAD(&ep->track.destinations, dt, trackelem);
    RWMSG_EP_UNLOCK(ep);
    ck_pr_inc_32(&ep->stat.objects.destinations);

    RW_ASSERT(atype == RWMSG_ADDRTYPE_UNICAST);
    dt->destination_ct=1;
    dt->atype = atype;
    strncpy(dt->apath, addrpath, sizeof(dt->apath));
    dt->apath[sizeof(dt->apath)-1]='\0';
    dt->apathhash = rw_hashlittle64(dt->apath, plen);

    dt->defstream.tx_win = 0;	/* never used without feedme cb */
    dt->defstream.defstream = TRUE;
    dt->defstream.refct = 1;
    dt->defstream.dest = dt;	/* no ref, this stream not destroyed in rwmsg_clichan.c */

ret:
    return dt;
}
Пример #2
0
static void 
fqd_queue_message_process(remote_data_client *me, fq_msg *msg)
{
  ck_fifo_spsc_t *work_queue = NULL;
  ck_fifo_spsc_entry_t *entry = NULL;
  int i = 0, tindex = 0;
  uint32_t blmin = UINT_MAX;
  struct incoming_message *m = malloc(sizeof(struct incoming_message));
  
  m->client = me;
  m->msg = msg;

  /* while we live in this queue, we ref the client so it can't be destroyed until the queue is cleared of it */
  fqd_remote_client_ref((remote_client *) m->client);

  /* find the least loaded queue */
  for ( ; i < worker_thread_count; i++) {
    uint32_t x = work_queue_backlogs[i];
    if (x < blmin) {
      blmin = x;
      tindex = i;
    }
  }
    
  ck_pr_inc_32(&work_queue_backlogs[tindex]);
  work_queue = &work_queues[tindex];
  entry = ck_fifo_spsc_recycle(work_queue);
  if (entry == NULL) {
    entry = malloc(sizeof(ck_fifo_spsc_entry_t));
  }
  ck_fifo_spsc_enqueue(work_queue, entry, m);
}
Пример #3
0
segment_t* _segment_list_get_segment_for_writing(struct segment_list *segment_list, uint32_t segment_number) {
    // We will never modify the segment list in this function, so we can take a read lock
    ck_rwlock_read_lock(segment_list->lock);

    segment_t *segment = __segment_number_to_segment(segment_list, segment_number);

    // Make sure we are not trying to get a segment before it has been allocated.  Getting a segment
    // anytime after it was allocated can easily happen because of a slow thread, but getting it
    // before it has been allocated should not happen.
    ensure(segment_number < segment_list->head, "Attempted to get a segment before it was allocated");

    // This segment is outside the list
    // TODO: More specific error handling
    if (!__is_segment_number_in_segment_list_inlock(segment_list, segment_number)) {
        ck_rwlock_read_unlock(segment_list->lock);
        return NULL;
    }

    // If this segment is not in the WRITING state, we may have just been too slow, so return NULL
    // rather than asserting to give the caller an opportunity to recover
    // TODO: More specific error handling
    if (segment->state != WRITING) {
        ck_rwlock_read_unlock(segment_list->lock);
        return NULL;
    }

    // Increment the refcount of the newly initialized segment since we are returning it
    ck_pr_inc_32(&segment->refcount);
    ck_rwlock_read_unlock(segment_list->lock);
    return segment;
}
Пример #4
0
as_status
as_event_command_execute(as_event_command* cmd, as_error* err)
{
    ck_pr_inc_32(&cmd->cluster->async_pending);

    // Only do this after the above increment to avoid a race with as_cluster_destroy().
    if (!cmd->cluster->valid) {
        as_event_command_free(cmd);
        return as_error_set_message(err, AEROSPIKE_ERR_CLIENT, "Client shutting down");
    }

    // Use pointer comparison for performance.
    // If portability becomes an issue, use "pthread_equal(event_loop->thread, pthread_self())"
    // instead.
    if (cmd->event_loop->thread == pthread_self()) {
        // We are already in event loop thread, so start processing.
        as_event_command_begin(cmd);
    }
    else {
        if (cmd->timeout_ms) {
            // Store current time in first 8 bytes which is not used yet.
            *(uint64_t*)cmd = cf_getms();
        }

        // Send command through queue so it can be executed in event loop thread.
        if (! as_event_send(cmd)) {
            as_event_command_free(cmd);
            return as_error_set_message(err, AEROSPIKE_ERR_CLIENT, "Failed to queue command");
        }
    }
    return AEROSPIKE_OK;
}
Пример #5
0
static inline void 
fq_push_free_message_stack(struct free_message_stack *stack, fq_msg *m) 
{
  if (stack == NULL) {
    return;
  }

  while(ck_pr_load_32(&stack->size) > stack->max_size) {
    ck_stack_entry_t *ce = ck_stack_pop_mpmc(&stack->stack);
    if (ce != NULL) {
      fq_msg *m = container_of(ce, fq_msg, cleanup_stack_entry);
      free(m);
      ck_pr_dec_32(&stack->size);
    }
    else break;
  }
  uint32_t c = ck_pr_load_32(&stack->size);
  if (c >= stack->max_size) {
    free(m);
    return;
  }

  ck_pr_inc_32(&stack->size);
  ck_stack_push_mpmc(&stack->stack, &m->cleanup_stack_entry);
}
Пример #6
0
rwsched_dispatch_queue_t
rwsched_dispatch_queue_create(rwsched_tasklet_ptr_t sched_tasklet,
                              const char *label,
                              dispatch_queue_attr_t attr)
{
  struct rwsched_dispatch_queue_s *queue;

  // Validate input paraemters
  RW_CF_TYPE_VALIDATE(sched_tasklet, rwsched_tasklet_ptr_t);
  rwsched_instance_ptr_t instance = sched_tasklet->instance;
  RW_CF_TYPE_VALIDATE(instance, rwsched_instance_ptr_t);

  // Allocate memory for the dispatch queue
  queue = (rwsched_dispatch_queue_t) RW_MALLOC0_TYPE(sizeof(*queue), rwsched_dispatch_queue_t);
  RW_ASSERT_TYPE(queue, rwsched_dispatch_queue_t);

  // If libdispatch is enabled for the entire instance, then call the libdispatch routine
  if (instance->use_libdispatch_only) {
    queue->header.libdispatch_object._dq = dispatch_queue_create(label, attr);
    RW_ASSERT(queue->header.libdispatch_object._dq);

    rwsched_tasklet_ref(sched_tasklet);
    ck_pr_inc_32(&sched_tasklet->counters.queues);

    return queue;
  }

  // Not yet implemented
  RW_CRASH();
  return NULL;
}
Пример #7
0
rwsched_tasklet_t *
rwsched_tasklet_new(rwsched_instance_t *instance)
{
  rwsched_tasklet_ptr_t sched_tasklet;
  //int i;

  // Validate input paraemters
  RW_CF_TYPE_VALIDATE(instance, rwsched_instance_ptr_t);

  // Allocate memory for the new sched_tasklet sched_tasklet structure and track it
  sched_tasklet = RW_CF_TYPE_MALLOC0(sizeof(*sched_tasklet), rwsched_tasklet_ptr_t);
  RW_CF_TYPE_VALIDATE(sched_tasklet, rwsched_tasklet_ptr_t);

  rwsched_instance_ref(instance);
  sched_tasklet->instance = instance;
  sched_tasklet->rwresource_track_handle = RW_RESOURCE_TRACK_CREATE_CONTEXT("Tasklet Context");

  // Look for an unused entry in tasklet_array (start the indexes at 1 for now)
  int i; for (i = 1 ; i < instance->tasklet_array->len ; i++) {
    if (g_array_index(instance->tasklet_array, rwsched_tasklet_ptr_t, i) == NULL) {
      g_array_index(instance->tasklet_array, rwsched_tasklet_ptr_t, i) = sched_tasklet;
      break;
    }
  }
  if (i >= instance->tasklet_array->len) {
    // Insert a new element at the end of the array
    g_array_append_val(instance->tasklet_array, sched_tasklet);
  }


#ifdef _CF_
  // Allocate an array of cftimer pointers and track it
  rwsched_CFRunLoopTimerRef cftimer = NULL;
  sched_tasklet->cftimer_array = g_array_sized_new(TRUE, TRUE, sizeof(void *), 256);
  g_array_append_val(sched_tasklet->cftimer_array, cftimer);

  // Allocate an array of cfsocket pointers and track it
  rwsched_CFSocketRef cfsocket = NULL;
  sched_tasklet->cfsocket_array = g_array_sized_new(TRUE, TRUE, sizeof(void *), 256);
  g_array_append_val(sched_tasklet->cfsocket_array, cfsocket);

  // Allocate an array of cfsocket pointers and track it
  rwsched_CFRunLoopSourceRef cfsource = NULL;
  sched_tasklet->cfsource_array = g_array_sized_new(TRUE, TRUE, sizeof(void *), 256);
  g_array_append_val(sched_tasklet->cfsource_array, cfsource);

  // Allocate an array of dispatch_what pointers and track it
  rwsched_dispatch_what_ptr_t dispatch_what = NULL;
  sched_tasklet->dispatch_what_array = g_array_sized_new(TRUE, TRUE, sizeof(void *), 256);
  g_array_append_val(sched_tasklet->dispatch_what_array, dispatch_what);
#endif

  rwsched_tasklet_ref(sched_tasklet);
  ck_pr_inc_32(&g_rwsched_tasklet_count);

  // Return the allocated sched_tasklet sched_tasklet structure
  return sched_tasklet;
}
Пример #8
0
void
as_cluster_set_async_max_conns_per_node(as_cluster* cluster, uint32_t async_size, uint32_t pipe_size)
{
	// Note: This setting only affects pools in new nodes.  Existing node pools are not changed.
	cluster->async_max_conns_per_node = async_size;
	cluster->pipe_max_conns_per_node = pipe_size;
	ck_pr_fence_store();
	ck_pr_inc_32(&cluster->version);
}
Пример #9
0
static inline as_seeds*
swap_seeds(as_cluster* cluster, as_seeds* seeds)
{
	ck_pr_fence_store();
	as_seeds* old = ck_pr_fas_ptr(&cluster->seeds, seeds);
	ck_pr_fence_store();
	ck_pr_inc_32(&cluster->version);
	return old;
}
Пример #10
0
static inline as_addr_maps*
swap_ip_map(as_cluster* cluster, as_addr_maps* ip_map)
{
	ck_pr_fence_store();
	as_addr_maps* old = ck_pr_fas_ptr(&cluster->ip_map, ip_map);
	ck_pr_fence_store();
	ck_pr_inc_32(&cluster->version);
	return old;
}
Пример #11
0
static inline void
as_event_put_connection(as_event_command* cmd)
{
    as_queue* queue = &cmd->node->async_conn_qs[cmd->event_loop->index];

    if (as_queue_push(queue, &cmd->conn)) {
        ck_pr_inc_32(&cmd->cluster->async_conn_pool);
    } else {
        as_event_release_connection(cmd->cluster, cmd->conn, queue);
    }
}
Пример #12
0
rwmsg_method_t *rwmsg_method_create(rwmsg_endpoint_t *ep,
				    const char *path,
				    rwmsg_signature_t *sig,
				    rwmsg_closure_t *requestcb) {
  RW_ASSERT(path);
  RW_ASSERT(sig);
  
  rwmsg_method_t *meth = NULL;

  uint64_t pathhash = 0;
  int32_t pathidx = 0;

  if (requestcb) {
    pathidx = rwmsg_endpoint_path_add(ep, path, &pathhash);
  } else {
    pathhash = rwmsg_endpoint_path_hash(ep, path);
  }
  RW_ASSERT(pathhash);

  if (pathidx || !requestcb/*null cb in broker, ignore other tasklet's entries for small model broker */) {
    meth = (rwmsg_method_t*)RW_MALLOC0_TYPE(sizeof(*meth), rwmsg_method_t);
    meth->pathidx = pathidx;
    meth->pathhash = pathhash;
    strncpy(meth->path, path, sizeof(meth->path));
    meth->path[sizeof(meth->path)-1] = '\0';
    ck_pr_inc_32(&sig->refct);
    meth->sig = sig;
    if (requestcb) {
      meth->cb = *requestcb;
    }
    meth->accept_fd = -1;
    
    ck_pr_inc_32(&ep->stat.objects.methods);
    meth->refct=1;
  }

  return meth;
}
Пример #13
0
segment_t* _segment_list_get_segment_for_reading(struct segment_list *segment_list, uint32_t segment_number) {

    // We have to take a write lock because we might be allocating a segment (reopening it from an
    // existing file).
    ck_rwlock_write_lock(segment_list->lock);

    segment_t *segment = __segment_number_to_segment(segment_list, segment_number);

    // This segment is outside the list
    // TODO: More specific error handling
    if (!__is_segment_number_in_segment_list_inlock(segment_list, segment_number)) {
        segment = NULL;
        goto end;
    }

    // If this segment is free, we may have just been too slow, so return NULL rather than asserting
    // to give the caller an opportunity to recover
    // TODO: More specific error handling
    if (segment->state == FREE) {
        segment = NULL;
        goto end;
    }

    // We should only be attempting to read from a segment in the READING or CLOSED states
    // If a user is attempting to get a segment for reading that is in the WRITING state, that is a
    // programming error, since it cannot happen as a race condition
    ensure(segment->state == READING || segment->state == CLOSED,
           "Attempted to get segment for reading not in the READING or CLOSED states");

    // If this segment is closed, reopen it
    if (segment->state == CLOSED) {

        // Allocate the segment and reopen the existing store file
        ensure(_allocate_segment_inlock(segment_list, segment_number, true/*reopen_store*/) == 0,
               "Failed to allocate segment, from existing file");

        // Reopened segments are in the READING state
        segment->state = READING;
    }

    // Increment the refcount of the newly initialized segment since we are returning it
    ck_pr_inc_32(&segment->refcount);

end:
    ck_rwlock_write_unlock(segment_list->lock);
    return segment;
}
Пример #14
0
as_connection_status
as_event_get_connection(as_event_command* cmd)
{
    as_queue* queue = &cmd->node->async_conn_qs[cmd->event_loop->index];
    as_async_connection* conn;

    // Find connection.
    while (as_queue_pop(queue, &conn)) {
        ck_pr_dec_32(&cmd->cluster->async_conn_pool);

        // Verify that socket is active and receive buffer is empty.
        int len = as_event_validate_connection(&conn->base);

        if (len == 0) {
            conn->cmd = cmd;
            cmd->conn = (as_event_connection*)conn;
            return AS_CONNECTION_FROM_POOL;
        }

        as_log_debug("Invalid async socket from pool: %d", len);
        as_event_release_connection(cmd->cluster, &conn->base, queue);
    }

    // Create connection structure only when node connection count within queue limit.
    if (as_queue_incr_total(queue)) {
        ck_pr_inc_32(&cmd->cluster->async_conn_count);
        conn = cf_malloc(sizeof(as_async_connection));
        conn->base.pipeline = false;
        conn->cmd = cmd;
        cmd->conn = &conn->base;
        return AS_CONNECTION_NEW;
    }
    else {
        as_error err;
        as_error_update(&err, AEROSPIKE_ERR_NO_MORE_CONNECTIONS,
                        "Max node/event loop %s async connections would be exceeded: %u",
                        cmd->node->name, queue->capacity);
        as_event_stop_timer(cmd);
        as_event_error_callback(cmd, &err);
        return AS_CONNECTION_TOO_MANY;
    }
}
Пример #15
0
rwsched_instance_t *
rwsched_instance_new(void)
{
  struct rwsched_instance_s *instance;

  // Allocate memory for the new instance

  // Register the rwsched instance types
  RW_CF_TYPE_REGISTER(rwsched_instance_ptr_t);
  RW_CF_TYPE_REGISTER(rwsched_tasklet_ptr_t);
  rwsched_CFRunLoopInit();
  rwsched_CFSocketInit();

  // Allocate the Master Resource-Tracking Handle
  g_rwresource_track_handle = RW_RESOURCE_TRACK_CREATE_CONTEXT("The Master Context");

  // Allocate a rwsched instance type and track it
  instance = RW_CF_TYPE_MALLOC0(sizeof(*instance), rwsched_instance_ptr_t);
  RW_CF_TYPE_VALIDATE(instance, rwsched_instance_ptr_t);

  // Set the instance configuration
  instance->config.single_thread = TRUE;

  // For now use libdispatch only
  instance->use_libdispatch_only = TRUE;
  // libdispatch_init();

  // Fake up a rwqueue placeholder object to use as the (NULL) DISPATCH_TARGET_QUEUE_DEFAULT
  RW_ASSERT(instance->use_libdispatch_only);
  instance->default_rwqueue = (rwsched_dispatch_queue_t) RW_MALLOC0_TYPE(sizeof(*instance->default_rwqueue), rwsched_dispatch_queue_t);
  RW_ASSERT_TYPE(instance->default_rwqueue, rwsched_dispatch_queue_t);
  instance->default_rwqueue->header.libdispatch_object._dq = DISPATCH_TARGET_QUEUE_DEFAULT;

  // Fake up a rwqueue placeholder object to use as DISPATCH_TARGET_QUEUE_MAIN
  RW_ASSERT(instance->use_libdispatch_only);
  instance->main_rwqueue = (rwsched_dispatch_queue_t) RW_MALLOC0_TYPE(sizeof(*instance->main_rwqueue), rwsched_dispatch_queue_t);
  RW_ASSERT_TYPE(instance->main_rwqueue, rwsched_dispatch_queue_t);
  instance->main_rwqueue->header.libdispatch_object._dq = dispatch_get_main_queue();

  // Fake up rwqueue placeholder objects for the usual four global
  // queues.  The pri values are not 0,1,2,3 or similar, they are
  // -MAX, -2, 0, 2.  We do not support arbitrary pri values, although
  // I think the dispatch API is intended to.
  RW_ASSERT(instance->use_libdispatch_only);
  RW_STATIC_ASSERT(RWSCHED_DISPATCH_QUEUE_GLOBAL_CT == 4);
  static long pris[RWSCHED_DISPATCH_QUEUE_GLOBAL_CT] = {
    DISPATCH_QUEUE_PRIORITY_HIGH,
    DISPATCH_QUEUE_PRIORITY_DEFAULT,
    DISPATCH_QUEUE_PRIORITY_LOW,
    DISPATCH_QUEUE_PRIORITY_BACKGROUND
  };
  int  i; for (i=0; i<RWSCHED_DISPATCH_QUEUE_GLOBAL_CT; i++) {
    instance->global_rwqueue[i].pri = pris[i];
    instance->global_rwqueue[i].rwq = (rwsched_dispatch_queue_t) RW_MALLOC0_TYPE(sizeof(*instance->global_rwqueue[i].rwq), rwsched_dispatch_queue_t);
    RW_ASSERT_TYPE(instance->global_rwqueue[i].rwq, rwsched_dispatch_queue_t);
    instance->global_rwqueue[i].rwq->header.libdispatch_object._dq = dispatch_get_global_queue(pris[i], 0);
    RW_ASSERT(instance->global_rwqueue[i].rwq->header.libdispatch_object._dq);
  }

  instance->main_cfrunloop_mode = kCFRunLoopDefaultMode;
  //instance->main_cfrunloop_mode = CFSTR("TimerMode");
  //instance->deferred_cfrunloop_mode = CFSTR("Deferred Mode");

  // Allocate an array of tasklet pointers and track it
  rwsched_tasklet_t *tasklet = NULL;
  instance->tasklet_array = g_array_sized_new(TRUE, TRUE, sizeof(void *), 256);
  g_array_append_val(instance->tasklet_array, tasklet);

  rwsched_instance_ref(instance);
  ck_pr_inc_32(&g_rwsched_instance_count);
  //RW_ASSERT(g_rwsched_instance_count <= 2);
  g_rwsched_instance = instance;

  instance->rwlog_instance = rwlog_init("RW.Sched");

  // Return the instance pointer
  return instance;
}