示例#1
0
int ftw_publisher_construct(struct ftw_socket_callsite **callsite, const char *addr,
    int linger, struct ftw_socket **sock)
{
    struct ftw_socket *inst;
    int rcs;
    int rco;
    int rcb;

    /*  Preconditions expected of LabVIEW. */
    ftw_assert(*callsite && addr);
    nn_mutex_lock(&(*callsite)->sync);

    rcs = nn_socket(AF_SP, NN_PUB);

    /*  Socket creation failure? */
    if (rcs < 0) {
        *sock = NULL;
        nn_mutex_unlock(&(*callsite)->sync);
        return rcs;
    }

    rco = nn_setsockopt(rcs, NN_SOL_SOCKET, NN_LINGER, &linger, sizeof(linger));
    if (rco < 0) {
        *sock = NULL;
        nn_mutex_unlock(&(*callsite)->sync);
        return rco;
    }

    rcb = nn_bind(rcs, addr);

    /*  Endpoint creation failure? */
    if (rcb < 0) {
        nn_close(rcs);
        *sock = NULL;
        nn_mutex_unlock(&(*callsite)->sync);
        return rcb;
    }

    inst = ftw_malloc(sizeof(struct ftw_socket));
    ftw_assert(inst);

    memset(inst, 0, sizeof(*inst));

    inst->id = rcs;
    inst->callsite = *callsite;

    nn_list_item_init(&inst->item);
    nn_list_insert(&(*callsite)->active_sockets, &inst->item,
        nn_list_end(&(*callsite)->active_sockets));

    *sock = inst;

    (*callsite)->lifetime_sockets++;
    nn_mutex_unlock(&(*callsite)->sync);

    return 0;

}
示例#2
0
static int nn_inproc_bind (const char *addr, void *hint,
    struct nn_epbase **epbase)
{
    struct nn_list_item *it;
    struct nn_binproc *binproc;
    struct nn_cinproc *cinproc;

    nn_mutex_lock (&self.sync);

    /*  Check whether the endpoint isn't already bound. */
    /*  TODO:  This is an O(n) algorithm! */
    for (it = nn_list_begin (&self.bound); it != nn_list_end (&self.bound);
          it = nn_list_next (&self.bound, it)) {
        binproc = nn_cont (it, struct nn_binproc, item);
        if (strncmp (addr, nn_binproc_getaddr (binproc),
              NN_SOCKADDR_MAX) == 0) {
            nn_mutex_unlock (&self.sync);
            return -EADDRINUSE;
        }
    }

    /*  Insert the entry into the endpoint repository. */
    binproc = nn_binproc_create (hint);
    nn_list_insert (&self.bound, &binproc->item, nn_list_end (&self.bound));

    /*  During this process new pipes may be created. */
    for (it = nn_list_begin (&self.connected);
          it != nn_list_end (&self.connected);
          it = nn_list_next (&self.connected, it)) {
        cinproc = nn_cont (it, struct nn_cinproc, item);
        if (strncmp (addr, nn_cinproc_getaddr (cinproc),
              NN_SOCKADDR_MAX) == 0) {

            /*  Check whether the two sockets are compatible. */
            if (!nn_epbase_ispeer (&binproc->epbase, cinproc->protocol))
                continue;

            nn_assert (cinproc->connects == 0);
            cinproc->connects = 1;
            nn_binproc_connect (binproc, cinproc);
        }
    }

    nn_assert (epbase);
    *epbase = &binproc->epbase;
    nn_mutex_unlock (&self.sync);

    return 0;
}
示例#3
0
MgErr ftw_nanomsg_reserve(struct ftw_socket_callsite **inst)
{
    static int callsite = 0;

    /*  Preconditions expected of LabVIEW. */
    ftw_assert(inst);

    /*  Creates a list of socket instances for each CLFN callsite. */
    if (*inst == NULL) {
        callsite++;
        ftw_debug("Reserving Socket Creation Callsite %d", callsite);
        *inst = ftw_malloc(sizeof(struct ftw_socket_callsite));
        ftw_assert(*inst);
        nn_mutex_init(&(*inst)->sync);
        nn_mutex_lock(&(*inst)->sync);
        nn_list_init(&(*inst)->active_sockets);
        (*inst)->id = callsite;
        (*inst)->lifetime_sockets = 0;
        nn_mutex_unlock(&(*inst)->sync);
    }
    else {
        ftw_assert_unreachable("Reserve happened twice; this is a problem with LabVIEW.");
    }

    return mgNoErr;
}
示例#4
0
文件: ins.c 项目: 4ker/nanomsg
void nn_ins_connect (struct nn_ins_item *item, nn_ins_fn fn)
{
    struct nn_list_item *it;
    struct nn_ins_item *bitem;

    nn_mutex_lock (&self.sync);

    /*  Insert the entry into the endpoint repository. */
    nn_list_insert (&self.connected, &item->item,
        nn_list_end (&self.connected));

    /*  During this process a pipe may be created. */
    for (it = nn_list_begin (&self.bound);
          it != nn_list_end (&self.bound);
          it = nn_list_next (&self.bound, it)) {
        bitem = nn_cont (it, struct nn_ins_item, item);
        if (strncmp (nn_epbase_getaddr (&item->epbase),
              nn_epbase_getaddr (&bitem->epbase), NN_SOCKADDR_MAX) == 0) {

            /*  Check whether the two sockets are compatible. */
            if (!nn_epbase_ispeer (&item->epbase, bitem->protocol))
                break;

            /*  Call back to cinproc to create actual connection. */
            fn (item, bitem);

            break;
        }
    }

    nn_mutex_unlock (&self.sync);
}
示例#5
0
void nn_worker_execute (struct nn_worker *self, struct nn_worker_task *task)
{
    nn_mutex_lock (&self->sync);
    nn_queue_push (&self->tasks, &task->item);
    nn_efd_signal (&self->efd);
    nn_mutex_unlock (&self->sync);
}
示例#6
0
文件: ins.c 项目: Adellica/nanomsg
int nn_ins_bind (struct nn_ins_item *item, nn_ins_fn fn)
{
    struct nn_list_item *it;
    struct nn_ins_item *bitem;
    struct nn_ins_item *citem;

    nn_mutex_lock (&self.sync);

    /*  Check whether the endpoint isn't already bound. */
    /*  TODO:  This is an O(n) algorithm! */
    for (it = nn_list_begin (&self.bound); it != nn_list_end (&self.bound);
          it = nn_list_next (&self.bound, it)) {
        bitem = nn_cont (it, struct nn_ins_item, item);

        if (strncmp (nn_ep_getaddr(bitem->ep), nn_ep_getaddr(item->ep),
            NN_SOCKADDR_MAX) == 0) {

            nn_mutex_unlock (&self.sync);
            return -EADDRINUSE;
        }
    }

    /*  Insert the entry into the endpoint repository. */
    nn_list_insert (&self.bound, &item->item,
        nn_list_end (&self.bound));

    /*  During this process new pipes may be created. */
    for (it = nn_list_begin (&self.connected);
          it != nn_list_end (&self.connected);
          it = nn_list_next (&self.connected, it)) {
        citem = nn_cont (it, struct nn_ins_item, item);
        if (strncmp (nn_ep_getaddr(item->ep), nn_ep_getaddr(citem->ep),
            NN_SOCKADDR_MAX) == 0) {

            /*  Check whether the two sockets are compatible. */
            if (!nn_ep_ispeer_ep (item->ep, citem->ep))
                continue;

            fn (item, citem);
        }
    }

    nn_mutex_unlock (&self.sync);

    return 0;
}
示例#7
0
文件: alloc.c 项目: Abioy/nanomsg
void nn_free (void *ptr)
{
    struct nn_alloc_hdr *chunk;
    
    if (!ptr)
        return;
    chunk = ((struct nn_alloc_hdr*) ptr) - 1;

    nn_mutex_lock (&nn_alloc_sync);
    nn_alloc_bytes -= chunk->size;
    --nn_alloc_blocks;
    printf ("Deallocating %s (%zu bytes)\n", chunk->name, chunk->size);
    printf ("Current memory usage: %zu bytes in %zu blocks\n",
        nn_alloc_bytes, nn_alloc_blocks);
    nn_mutex_unlock (&nn_alloc_sync);

    free (chunk);
}
示例#8
0
void nn_worker_term (struct nn_worker *self)
{
    /*  Ask worker thread to terminate. */
    nn_mutex_lock (&self->sync);
    nn_queue_push (&self->tasks, &self->stop);
    nn_efd_signal (&self->efd);
    nn_mutex_unlock (&self->sync);

    /*  Wait till worker thread terminates. */
    nn_thread_term (&self->thread);

    /*  Clean up. */
    nn_timerset_term (&self->timerset);
    nn_poller_term (&self->poller);
    nn_efd_term (&self->efd);
    nn_queue_item_term (&self->stop);
    nn_queue_term (&self->tasks);
    nn_mutex_term (&self->sync);
}
示例#9
0
文件: atomic.c 项目: 4ker/nanomsg
uint32_t nn_atomic_dec (struct nn_atomic *self, uint32_t n)
{
#if defined NN_ATOMIC_WINAPI
    return (uint32_t) InterlockedExchangeAdd ((LONG*) &self->n, -((LONG) n));
#elif defined NN_ATOMIC_SOLARIS
    return atomic_add_32_nv (&self->n, -((int32_t) n)) + n;
#elif defined NN_ATOMIC_GCC_BUILTINS
    return (uint32_t) __sync_fetch_and_sub (&self->n, n);
#elif defined NN_ATOMIC_MUTEX
    uint32_t res;
    nn_mutex_lock (&self->sync);
    res = self->n;
    self->n -= n;
    nn_mutex_unlock (&self->sync);
    return res;
#else
#error
#endif
}
示例#10
0
文件: alloc.c 项目: Abioy/nanomsg
void *nn_alloc_ (size_t size, const char *name)
{
    uint8_t *chunk;

    chunk = malloc (sizeof (struct nn_alloc_hdr) + size);
    if (!chunk)
        return NULL;

    nn_mutex_lock (&nn_alloc_sync);
    ((struct nn_alloc_hdr*) chunk)->size = size;
    ((struct nn_alloc_hdr*) chunk)->name = name;
    nn_alloc_bytes += size;
    ++nn_alloc_blocks;
    printf ("Allocating %s (%zu bytes)\n", name, size);
    printf ("Current memory usage: %zu bytes in %zu blocks\n",
        nn_alloc_bytes, nn_alloc_blocks);
    nn_mutex_unlock (&nn_alloc_sync);

    return chunk + sizeof (struct nn_alloc_hdr);
}
示例#11
0
int ftw_socket_destroy(struct ftw_socket ** const sock)
{
    int rc;

    /*  Preconditions expected of LabVIEW. */
    ftw_assert(sock);

    if (*sock == NULL) {
        errno = EBADF;
        return -1;
    }

    nn_mutex_lock(&(*sock)->callsite->sync);
    rc = ftw_socket_close(*sock);
    nn_list_erase(&(*sock)->callsite->active_sockets, &(*sock)->item);
    nn_mutex_unlock(&(*sock)->callsite->sync);
    ftw_assert(ftw_free(*sock) == mgNoErr);
    *sock = NULL;

    return rc;
}
示例#12
0
static int nn_inproc_connect (const char *addr, void *hint,
    struct nn_epbase **epbase)
{
    struct nn_list_item *it;
    struct nn_cinproc *cinproc;
    struct nn_binproc *binproc;

    nn_mutex_lock (&self.sync);

    /*  Insert the entry into the endpoint repository. */
    cinproc = nn_cinproc_create (hint);
    nn_list_insert (&self.connected, &cinproc->item,
        nn_list_end (&self.connected));

    /*  During this process a pipe may be created. */
    for (it = nn_list_begin (&self.bound);
          it != nn_list_end (&self.bound);
          it = nn_list_next (&self.bound, it)) {
        binproc = nn_cont (it, struct nn_binproc, item);
        if (strncmp (addr, nn_binproc_getaddr (binproc),
              NN_SOCKADDR_MAX) == 0) {

            /*  Check whether the two sockets are compatible. */
            if (!nn_epbase_ispeer (&cinproc->epbase, binproc->protocol))
                break;

            ++binproc->connects;
            nn_cinproc_connect (cinproc, binproc);
            break;
        }
    }

    nn_assert (epbase);
    *epbase = &cinproc->epbase;
    nn_mutex_unlock (&self.sync);

    return 0;
}
示例#13
0
文件: alloc.c 项目: Abioy/nanomsg
void *nn_realloc (void *ptr, size_t size)
{
    struct nn_alloc_hdr *oldchunk;
    struct nn_alloc_hdr *newchunk;
    size_t oldsize;

    oldchunk = ((struct nn_alloc_hdr*) ptr) - 1;
    oldsize = oldchunk->size;
    newchunk = realloc (oldchunk, sizeof (struct nn_alloc_hdr) + size);
    if (!newchunk)
        return NULL;
    newchunk->size = size;

    nn_mutex_lock (&nn_alloc_sync);
    nn_alloc_bytes -= oldsize;
    nn_alloc_bytes += size;
    printf ("Reallocating %s (%zu bytes to %zu bytes)\n",
        newchunk->name, oldsize, size);
    printf ("Current memory usage: %zu bytes in %zu blocks\n",
        nn_alloc_bytes, nn_alloc_blocks);
    nn_mutex_unlock (&nn_alloc_sync);

    return newchunk + sizeof (struct nn_alloc_hdr);
}
示例#14
0
void ftw_nanomsg_shutdown_active_sockets(struct ftw_socket_callsite *callsite)
{
    struct ftw_socket *sock;
    struct nn_list_item *it;

    /*  Preconditions expected of LabVIEW. */
    ftw_assert(callsite);
    nn_mutex_lock(&callsite->sync);

    ftw_debug("Shutting down sockets from Callsite %d", callsite->id);

    it = nn_list_begin(&callsite->active_sockets);
    while (it != NULL) {
        sock = nn_cont(it, struct ftw_socket, item);
        ftw_debug("Cleaning up active socket: %04d", sock->id);
        ftw_socket_close(sock);
        it = nn_list_erase(&callsite->active_sockets, it);
    }

    nn_list_term(&callsite->active_sockets);
    nn_mutex_unlock(&callsite->sync);

    return;
}
示例#15
0
void nn_worker_cancel (struct nn_worker *self, struct nn_worker_task *task)
{
    nn_mutex_lock (&self->sync);
    nn_queue_remove (&self->tasks, &task->item);
    nn_mutex_unlock (&self->sync);
}
示例#16
0
文件: ins.c 项目: 4ker/nanomsg
void nn_ins_unbind (struct nn_ins_item *item)
{
    nn_mutex_lock (&self.sync);
    nn_list_erase (&self.bound, &item->item);
    nn_mutex_unlock (&self.sync);
}
示例#17
0
文件: ins.c 项目: 4ker/nanomsg
void nn_ins_disconnect (struct nn_ins_item *item)
{
    nn_mutex_lock (&self.sync);
    nn_list_erase (&self.connected, &item->item);
    nn_mutex_unlock (&self.sync);
}
示例#18
0
static void nn_worker_routine (void *arg)
{
    int32_t rc,pevent;
    struct nn_worker *self;
    struct nn_poller_hndl *phndl;
    struct nn_timerset_hndl *thndl;
    struct nn_queue tasks;
    struct nn_queue_item *item;
    struct nn_worker_task *task;
    struct nn_worker_fd *fd;
    struct nn_worker_timer *timer;
    PostMessage("nn_worker_routine started\n");
    self = (struct nn_worker*) arg;
    while ( 1 ) //  Infinite loop. It will be interrupted only when the object is shut down.
    {
        // Wait for new events and/or timeouts.
        rc = nn_poller_wait(&self->poller,nn_timerset_timeout (&self->timerset));
        errnum_assert(rc == 0, -rc);
        while ( 1 ) // Process all expired timers
        {
            rc = nn_timerset_event(&self->timerset, &thndl);
            if ( rc == -EAGAIN )
                break;
            //PostMessage("nn_worker process expired user\n");
            errnum_assert(rc == 0, -rc);
            timer = nn_cont(thndl, struct nn_worker_timer, hndl);
            nn_ctx_enter(timer->owner->ctx);
            nn_fsm_feed(timer->owner,-1,NN_WORKER_TIMER_TIMEOUT,timer);
            nn_ctx_leave(timer->owner->ctx);
        }
        while ( 1 ) // Process all events from the poller
        {
            rc = nn_poller_event(&self->poller,&pevent,&phndl); //  Get next poller event, such as IN or OUT
            if ( nn_slow(rc == -EAGAIN) )
                break;
            //PostMessage("nn_worker process all events from the poller\n");
            if ( phndl == &self->efd_hndl ) // If there are any new incoming worker tasks, process them
            {
                nn_assert (pevent == NN_POLLER_IN);
                //  Make a local copy of the task queue. This way the application threads are not blocked and can post new tasks while the existing tasks are being processed. Also, new tasks can be posted from within task handlers
                nn_mutex_lock(&self->sync);
                nn_efd_unsignal(&self->efd);
                memcpy(&tasks,&self->tasks,sizeof(tasks));
                nn_queue_init(&self->tasks);
                nn_mutex_unlock(&self->sync);
                while ( 1 )
                {
                    item = nn_queue_pop(&tasks); //  Next worker task
                    if ( nn_slow(!item) )
                        break;
                    //PostMessage("nn_worker next worker task\n");
                    if ( nn_slow(item == &self->stop) ) //  If the worker thread is asked to stop, do so
                    {
                        nn_queue_term(&tasks);
                        return;
                    }
                    // It's a user-defined task. Notify the user that it has arrived in the worker thread
                    //PostMessage("nn_worker user defined task\n");
                    task = nn_cont(item,struct nn_worker_task,item);
                    nn_ctx_enter(task->owner->ctx);
                    nn_fsm_feed(task->owner,task->src,NN_WORKER_TASK_EXECUTE,task);
                    nn_ctx_leave (task->owner->ctx);
                }
                nn_queue_term (&tasks);
                continue;
            }
            PostMessage("nn_worker true i/o, invoke handler\n");
            fd = nn_cont(phndl,struct nn_worker_fd,hndl); // It's a true I/O event. Invoke the handler
            PostMessage("nn_worker true i/o, fd.%p\n",fd);
            nn_ctx_enter(fd->owner->ctx);
            PostMessage("nn_worker true i/o, after nn_ctx_enter\n");
            nn_fsm_feed(fd->owner,fd->src,pevent,fd);
            PostMessage("nn_worker true i/o, after nn_fsm_feed leave.%p\n",fd->owner->ctx);
            nn_ctx_leave(fd->owner->ctx);
            PostMessage("nn_worker true i/o, after nn_ctx_leave\n");
        }
    }
示例#19
0
int ftw_subscriber_construct(struct ftw_socket_callsite **callsite, LVUserEventRef *lv_event,
    const char *addr, int linger, int max_recv_size, struct ftw_socket **sock)
{
    struct ftw_socket *inst;
    int rcc;
    int rcs;
    int rco;

    /*  Preconditions expected of LabVIEW. */
    ftw_assert(*callsite && addr);
    nn_mutex_lock(&(*callsite)->sync);

    rcs = nn_socket(AF_SP, NN_SUB);

    /*  Socket creation failure? */
    if (rcs < 0) {
        *sock = NULL;
        nn_mutex_unlock(&(*callsite)->sync);
        return rcs;
    }

    rco = nn_setsockopt(rcs, NN_SOL_SOCKET, NN_LINGER, &linger, sizeof(linger));
    if (rco < 0) {
        *sock = NULL;
        nn_mutex_unlock(&(*callsite)->sync);
        return rco;
    }

    rco = nn_setsockopt(rcs, NN_SOL_SOCKET, NN_RCVMAXSIZE, &max_recv_size, sizeof(max_recv_size));
    if (rco < 0) {
        *sock = NULL;
        nn_mutex_unlock(&(*callsite)->sync);
        return rco;
    }

    rcc = nn_connect(rcs, addr);

    /*  Endpoint creation failure? */
    if (rcc < 0) {
        nn_close(rcs);
        *sock = NULL;
        nn_mutex_unlock(&(*callsite)->sync);
        return rcc;
    }

    rco = nn_setsockopt (rcs, NN_SUB, NN_SUB_SUBSCRIBE, "", 0);
    if (rco < 0) {
        nn_close(rcs);
        *sock = NULL;
        nn_mutex_unlock(&(*callsite)->sync);
        return rco;
    }

    inst = ftw_malloc(sizeof(struct ftw_socket));
    ftw_assert(inst);

    inst->incoming_msg_notifier_event = *lv_event;
    inst->id = rcs;
    inst->callsite = *callsite;

    nn_list_item_init(&inst->item);
    nn_list_insert(&(*callsite)->active_sockets, &inst->item,
        nn_list_end(&(*callsite)->active_sockets));

    nn_sem_init(&inst->msg_acknowledged);

    /*  Launch thread and wait for it to initialize. */
    nn_sem_init(&inst->async_recv_ready);
    nn_thread_init(&inst->async_recv_thread, ftw_subscriber_async_recv_thread, inst);
    nn_sem_wait(&inst->async_recv_ready);

    *sock = inst;

    (*callsite)->lifetime_sockets++;
    nn_mutex_unlock(&(*callsite)->sync);

    return 0;
}
示例#20
0
void nn_inproc_disconnect (struct nn_cinproc *cinproc)
{
    nn_mutex_lock (&self.sync);
    nn_list_erase (&self.connected, &cinproc->item);
    nn_mutex_unlock (&self.sync);
}
示例#21
0
void nn_inproc_unbind (struct nn_binproc *binproc)
{
    nn_mutex_lock (&self.sync);
    nn_list_erase (&self.bound, &binproc->item);
    nn_mutex_unlock (&self.sync);
}