Esempio n. 1
0
void sock_fd_remove (int fd)
{
	unsigned	i, n;

	lock_take (sock_lock);
	n = atomic_get_w (num_fds);
	for (i = 0; i < n; i++)
		if ((*fds) [i].fd == fd) {
			lock_take (poll_lock);
			n = atomic_get_w (num_fds);
			if (i + 1 < n) {
				memmove (&(*fds) [i],
						&(*fds) [i + 1],
						(n - i - 1) * sizeof (struct pollfd));
				memmove (&(*fcts) [i],
						&(*fcts) [i + 1],
						(n - i - 1) * sizeof (RSDATAFCT));
				memmove (&(*ud) [i],
						&(*ud) [i + 1],
						(n - i - 1) * sizeof (void *));
				memmove (&(*names) [i],
						&(*names) [i + 1],
						(n - i - 1) * sizeof (char *));
			}
			atomic_dec_w (num_fds);
			lock_release (poll_lock);
			break;
		}
	lock_release (sock_lock);
}
Esempio n. 2
0
void sock_fd_schedule (void)
{
	struct pollfd	*iop;
	unsigned	i, n;
	RHDATAFCT	fct;
	int		fd;
	void		*user;
	short		events;

	lock_take (sock_lock);
	n = atomic_get_w (num_fds);
	for (i = 0, iop = *fds; i < n; i++, iop++)
		if (iop->revents) {
			fct = (*fcts) [i];
			fd = iop->fd;
			events = iop->revents;
			user = (*ud) [i];
			iop->revents = 0;
			lock_release (sock_lock);
			(*fct) (fd, events, user);
			lock_take (sock_lock);
			n = atomic_get_w (num_fds);
		}
	lock_release (sock_lock);
}
Esempio n. 3
0
DDS_ReturnCode_t DDS_DataWriter_get_matched_subscriptions(
					DDS_DataWriter        wp,
					DDS_InstanceHandleSeq *handles)
{
	Topic_t		*tp;
	Endpoint_t	*ep;
	FilteredTopic_t	*ftp;
	DDS_ReturnCode_t ret;

	ctrc_begind (DCPS_ID, DCPS_DW_G_MATCH_S, &wp, sizeof (wp));
	ctrc_contd (&handles, sizeof (handles));
	ctrc_endd ();

	if (!handles)
		return (DDS_RETCODE_BAD_PARAMETER);

	DDS_SEQ_INIT (*handles);
	if (!writer_ptr (wp, 0, &ret))
		return (ret);

	tp = wp->w_topic;
	if (lock_take (tp->lock))
		return (DDS_RETCODE_ALREADY_DELETED);

#ifndef RW_TOPIC_LOCK
	if (lock_take (wp->w_lock)) {
		lock_release (tp->lock);
		return (DDS_RETCODE_ALREADY_DELETED);
	}
#endif
	for (ep = tp->writers; ep && ep != &wp->w_ep; ep = ep->next)
		;
	if (!ep) {
		ret = DDS_RETCODE_ALREADY_DELETED;
		goto done;
	}
	for (ep = tp->readers; ep; ep = ep->next)
		if (check_matched_subscription (wp, ep, handles, NULL)) {
			ret = DDS_RETCODE_OUT_OF_RESOURCES;
			goto done;
		}

	for (ftp = tp->filters; ftp; ftp = ftp->next)
		for (ep = ftp->topic.readers; ep; ep = ep->next)
			if (check_matched_subscription (wp, ep, handles, NULL)) {
				ret = DDS_RETCODE_OUT_OF_RESOURCES;
				goto done;
			}

    done:
#ifndef RW_TOPIC_LOCK
	lock_release (wp->w_lock);
#endif
	lock_release (tp->lock);
	return (ret);
}
Esempio n. 4
0
DDS_ReturnCode_t DDS_Topic_set_qos (DDS_Topic tp, DDS_TopicQos *qos)
{
	Endpoint_t		*ep;
	Reader_t		*rp;
	Writer_t		*wp;
	DDS_ReturnCode_t	ret;

	ctrc_begind (DCPS_ID, DCPS_T_S_QOS, &tp, sizeof (tp));
	ctrc_contd (&qos, sizeof (qos));
	ctrc_endd ();

	if (!topic_ptr (tp, 1, &ret))
		return (ret);

	if (qos == DDS_TOPIC_QOS_DEFAULT)
		qos = &tp->domain->def_topic_qos;
	else if (!qos_valid_topic_qos (qos)) {
		ret = DDS_RETCODE_BAD_PARAMETER;
		goto done;
	}
	ret = qos_topic_update (&tp->qos, qos);
	if (ret != DDS_RETCODE_OK)
		goto done;

	/* Update all local Readers associated with topic. */
	for (ep = tp->readers; ep; ep = ep->next)
		if ((ep->entity.flags & EF_LOCAL) != 0) {
			rp = (Reader_t *) ep;
#ifdef RW_LOCKS
			lock_take (rp->r_lock);
#endif
			disc_reader_update (tp->domain, rp, 1, 0);
#ifdef RW_LOCKS
			lock_release (rp->r_lock);
#endif
		}

	/* Update all local Writers associated with topic. */
	for (ep = tp->writers; ep; ep = ep->next)
		if ((ep->entity.flags & EF_LOCAL) != 0) {
			wp = (Writer_t *) ep;
#ifdef RW_LOCKS
			lock_take (wp->w_lock);
#endif
			disc_writer_update (tp->domain, wp, 1, 0);
#ifdef RW_LOCKS
			lock_release (wp->w_lock);
#endif
		}

    done:
    	lock_release (tp->lock);
	return (ret);
}
Esempio n. 5
0
void ctt_assert (Domain_t *dp)
{
	Reader_t	*rp;
	Writer_t	*wp;

	rp = (Reader_t *) dp->participant.p_builtin_ep [EPB_PARTICIPANT_VOL_SEC_R];
	lock_take (rp->r_lock);
	assert (rtps_endpoint_assert (&rp->r_lep) == DDS_RETCODE_OK);
	lock_release (rp->r_lock);
	wp = (Writer_t *) dp->participant.p_builtin_ep [EPB_PARTICIPANT_VOL_SEC_W];
	lock_take (wp->w_lock);
	assert (rtps_endpoint_assert (&wp->w_lep) == DDS_RETCODE_OK);
	lock_release (wp->w_lock);
}
Esempio n. 6
0
void sock_fd_fct_socket (int fd, RSDATAFCT fct)
{
	unsigned	i, n;

	lock_take (sock_lock);
	n = atomic_get_w (num_fds);
	for (i = 0; i < n; i++)
		if ((*fds) [i].fd == fd) {
			lock_take (poll_lock);
			(*fcts) [i] = fct;
			lock_release (poll_lock);
			break;
		}
	lock_release (sock_lock);
}
Esempio n. 7
0
void sock_fd_udata_socket (int fd, void *udata)
{
	unsigned	i, n;

	lock_take (sock_lock);
	n = atomic_get_w (num_fds);
	for (i = 0; i < n; i++)
		if ((*fds) [i].fd == fd) {
			lock_take (poll_lock);
			(*ud) [i] = udata;
			lock_release (poll_lock);
			break;
		}
	lock_release (sock_lock);
}
Esempio n. 8
0
void sock_fd_remove_handle (HANDLE h)
{
	unsigned	i;
	SockHandle_t	*hp;

	lock_take (sock_lock);
	for (i = 0, hp = &(*handles) [0]; i < num_handles; i++, hp++)
		if (hp->handle == h) {
			if (hp->index + 1 < nhandles) {
				memmove (&whandles [hp->index],
					 &whandles [hp->index + 1],
					 (nhandles - i - 1) *
					 sizeof (HANDLE));
				memmove (&wsock [hp->index],
					 &wsock [hp->index + 1],
					 (nhandles - i - 1) *
					 sizeof (Sock_t *));
			}
			nhandles--;
			if (i + 1 < num_handles)
				memmove (&(*handles) [i],
					 &(*handles) [i + 1],
					 (num_handles - i - 1) *
					 sizeof (SockHandle_t));
			num_handles--;
			break;
		}
	lock_release (sock_lock);
}
Esempio n. 9
0
int dds_cond_broadcast (cond_t *cv)
{
	int	have_waiters, res;

	/* Ensure that waiters and was_broadcast are consistent. */
	lock_take (cv->waiters_lock);
	have_waiters = 0;
	if (cv->waiters > 0) {
		cv->was_broadcast = 1;
		have_waiters = 1;
	}
	lock_release (cv->waiters_lock);
	res = DDS_RETCODE_OK;
	if (have_waiters) {

		/* Wake up all the waiters. */
		if (!sema_release (cv->sema))
			res = DDS_RETCODE_ERROR;

		/* Wait for all the awakened threads to acquire their part of
		   the counting semaphore. */
		else if (!ev_wait (cv->waiters_done))
		        res = DDS_RETCODE_ERROR;

		cv->was_broadcast = 0;
	}
	return (res);
}
Esempio n. 10
0
int evt_trigger(spdid_t spdid, long extern_evt)
{
	struct evt *e;
	int ret = 0;

	lock_take(&evt_lock);

	e = mapping_find(extern_evt);
	if (NULL == e) goto err;

	ACT_RECORD(ACT_TRIGGER, spdid, e->extern_id, cos_get_thd_id(), 0);
	/* Trigger an event being waited for? */
	if (0 != (ret = __evt_trigger(e))) {
		lock_release(&evt_lock);
		ACT_RECORD(ACT_WAKEUP, spdid, e->extern_id, cos_get_thd_id(), ret);
		if (sched_wakeup(cos_spd_id(), ret)) BUG();
	} else {
		lock_release(&evt_lock);
	}

	return 0;
err:
	lock_release(&evt_lock);
	return -1;
}
Esempio n. 11
0
/* Wait for a specific event */
int evt_wait(spdid_t spdid, long extern_evt)
{
	struct evt *e;

	while (1) {
		int ret;

		lock_take(&evt_lock);
		e = mapping_find(extern_evt);
		if (NULL == e) goto err;
		if (0 > (ret = __evt_read(e))) goto err;
		ACT_RECORD(ACT_WAIT, spdid, e->extern_id, cos_get_thd_id(), 0);
		lock_release(&evt_lock);
		if (1 == ret) {
			assert(extern_evt == e->extern_id);
			return 0;
		} else {
			ACT_RECORD(ACT_SLEEP, spdid, e->extern_id, cos_get_thd_id(), 0);
			if (0 > sched_block(cos_spd_id(), 0)) BUG();
		}
	}
err:
	lock_release(&evt_lock);
	return -1; 
}
Esempio n. 12
0
/* Wait on a group of events (like epoll) */
long evt_grp_wait(spdid_t spdid)
{
	struct evt_grp *g;
	struct evt *e = NULL;
	long extern_evt;

	while (1) {
		lock_take(&evt_lock);

		g = evt_grp_find(cos_get_thd_id());
		ACT_RECORD(ACT_WAIT_GRP, spdid, e ? e->extern_id : 0, cos_get_thd_id(), 0);
		if (NULL == g) goto err;
		if (__evt_grp_read(g, &e)) goto err;

		if (NULL != e) {
			extern_evt = e->extern_id;
			lock_release(&evt_lock);
			return extern_evt;
		} else {
			lock_release(&evt_lock);
			ACT_RECORD(ACT_SLEEP, spdid, 0, cos_get_thd_id(), 0);
			if (0 > sched_block(cos_spd_id(), 0)) BUG();
		}
	}
err:
	lock_release(&evt_lock);
	return -1; 
}
Esempio n. 13
0
/* 
 * FIXME: keeping the lock during a bunch of memory allocation.  This
 * is never good, but the code is much simpler for it.  A trade-off
 * I'm commonly making now.
 */
long evt_create(spdid_t spdid)
{
	u16_t tid = cos_get_thd_id();
	struct evt_grp *g;
	struct evt *e;
	int ret = -ENOMEM;

	lock_take(&evt_lock);
	g = evt_grp_find(tid);
	/* If the group associated with this thread hasn't been
	 * created yet. */
	if (!g) {
		g = evt_grp_create(spdid, tid);
		if (NULL == g) goto err;
		e = __evt_new(g);
		if (NULL == e) {
			evt_grp_free(g);
			goto err;
		}
		evt_grp_add(g);
	} else {
		e = __evt_new(g);
		if (NULL == e) goto err;
	}
	e->extern_id = mapping_create(e);
	if (0 > e->extern_id) goto free_evt_err;
	ret = e->extern_id;
done:
	lock_release(&evt_lock);
	return ret;
free_evt_err:
	__evt_free(e);
err:
	goto done;
}
Esempio n. 14
0
void t_dump (void)
{
	Topic	*tp;

	if (LIST_EMPTY (topics)) {
		printf ("No topics discovered.\r\n");
		return;
	}
	printf ("Active   #rd    #wr   #msgs   #disp   #no_w   Topic\r\n");
	printf ("------   ---    ---   -----   -----   -----   -----\r\n");
	lock_take (topic_lock);
	LIST_FOREACH (topics, tp) {
		if (tp->active)
			printf ("   *  ");
		else
			printf ("      ");
		printf ("%6u %6u %7lu %7lu %7lu   %s/%s\r\n", 
					     nendpoints (tp->writers),
					     nendpoints (tp->readers),
					     tp->ndata,
					     tp->ndispose,
					     tp->nnowriter,
					     tp->topic_name,
					     tp->type_name);
	}
	lock_release (topic_lock);
}
Esempio n. 15
0
void sock_fd_remove_socket (SOCKET s)
{
	unsigned	i;
	SockSocket_t	*sp;

	lock_take (sock_lock);
	for (i = 0, sp = &(*sockets) [0]; i < num_socks; i++, sp++)
		if (sp->socket == s) {
			WSACloseEvent (sp->handle);
			if (sp->index + 1 < nhandles) {
				memmove (&whandles [sp->index],
					 &whandles [sp->index + 1],
					 (nhandles - i - 1) *
					 sizeof (HANDLE));
				memmove (&wsock [sp->index],
					 &wsock [sp->index + 1],
					 (nhandles - i - 1) *
					 sizeof (Sock_t *));
			}
			nhandles--;
			if (i + 1 < num_socks)
				memmove (&(*sockets) [i],
					 &(*sockets) [i + 1],
					 (num_socks - i - 1) *
					 sizeof (SockSocket_t));

			num_socks--;
			break;
		}
	lock_release (sock_lock);
}
Esempio n. 16
0
static void *alloc_rb_buff(rb_meta_t *r)
{
	struct buff_page *p;
	int i;
	void *ret = NULL;

	lock_take(&r->l);
	if (EMPTY_LIST(&r->avail_pages, next, prev)) {
		if (NULL == (p = alloc_buff_page())) {
			lock_release(&r->l);
			return NULL;
		}
		ADD_LIST(&r->avail_pages, p, next, prev);
	}
	p = FIRST_LIST(&r->avail_pages, next, prev);
	assert(p->amnt_buffs < NP_NUM_BUFFS);
	for (i = 0 ; i < NP_NUM_BUFFS ; i++) {
		if (p->buff_used[i] == 0) {
			p->buff_used[i] = 1;
			ret = p->buffs[i];
			p->amnt_buffs++;
			break;
		}
	}
	assert(NULL != ret);
	if (p->amnt_buffs == NP_NUM_BUFFS) {
		REM_LIST(p, next, prev);
		ADD_LIST(&r->used_pages, p, next, prev);
	}
	lock_release(&r->l);
	return ret;
}
Esempio n. 17
0
int __evt_wait(spdid_t spdid, long extern_evt, int n)
{
    struct evt *e;

    while (1) {
        int ret;

        lock_take(&evt_lock);
        e = mapping_find(extern_evt);
        if (NULL == e) goto err;
        if (0 > (ret = __evt_read(e))) goto err;
        ACT_RECORD(ACT_WAIT, spdid, e->extern_id, cos_get_thd_id(), 0);
        e->n_wait = n;
        e->core_id = cos_cpuid();
        if (ret == 1) e->n_received = 0;
        lock_release(&evt_lock);
        if (1 == ret) {
            assert(extern_evt == e->extern_id);
            return 0;
        } else {
            ACT_RECORD(ACT_SLEEP, spdid, e->extern_id, cos_get_thd_id(), 0);

            /* We can use acaps to block / wakeup, which
             * can avoid calling scheduler. But it's like
             * a hack. */

            if (0 > sched_block(cos_spd_id(), 0)) BUG();
        }
    }

err:
    lock_release(&evt_lock);
    return -1;
}
Esempio n. 18
0
DDS_ReturnCode_t DDS_DataWriter_set_qos (DDS_DataWriter wp,
					 DDS_DataWriterQos *qos)
{
	DDS_ReturnCode_t ret;

	ctrc_begind (DCPS_ID, DCPS_DW_S_QOS, &wp, sizeof (wp));
	ctrc_contd (&qos, sizeof (qos));
	ctrc_endd ();

	if (!writer_ptr (wp, 1, &ret))
		return (ret);

	if (qos == DDS_DATAWRITER_QOS_DEFAULT)
		qos = &wp->w_publisher->def_writer_qos;
	else if (!qos_valid_writer_qos (qos)) {
		ret = DDS_RETCODE_BAD_PARAMETER;
		goto done;
	}
	ret = qos_writer_update (&wp->w_qos, qos);

    done:
	lock_release (wp->w_lock);

	if (ret)
		return (ret);

	lock_take (wp->w_topic->domain->lock);
	dcps_update_writer_qos (NULL, &wp, wp->w_publisher);
	lock_release (wp->w_topic->domain->lock);

	return (ret);
}
Esempio n. 19
0
static int sedp_subscription_update (Domain_t *dp, Reader_t *rp)
{
	GUID_t		guid;
	Writer_t	*sw;
	Topic_t		*tp;
	FilteredTopic_t	*ftp;
	HCI		hci;
	InstanceHandle	handle;
	DDS_HANDLE	endpoint;
	FTime_t		time;
	int		error;

	/* Derive key and publication endpoint. */
	guid.prefix = dp->participant.p_guid_prefix;
	guid.entity_id = rp->r_entity_id;

#if defined (DDS_SECURITY) && defined (DDS_NATIVE_SECURITY)
	if (SECURE_DISCOVERY (dp, rp->r_disc_prot))
		sw = (Writer_t *) dp->participant.p_builtin_ep [EPB_SUBSCRIPTION_SEC_W];
	else
#endif
		sw = (Writer_t *) dp->participant.p_builtin_ep [EPB_SUBSCRIPTION_W];
	if (!sw)
		return (DDS_RETCODE_ALREADY_DELETED);

	/* Lookup instance. */
	lock_take (sw->w_lock);
	hci = hc_lookup_key (sw->w_cache, (unsigned char *) &guid,
							sizeof (guid), &handle);
	if (!hci) {
		warn_printf ("sedp_subscription_update: failed to lookup instance handle!");
		lock_release (sw->w_lock);
		return (DDS_RETCODE_ALREADY_DELETED);
	}

	/* Write subscription data. */
	tp = rp->r_topic;
	if ((tp->entity.flags & EF_FILTERED) != 0) {
		ftp = (FilteredTopic_t *) tp;
		tp = ftp->related;
	}
	if (sedp_log)
		log_printf (SEDP_ID, 0, "SEDP: Resend %ssubscription (%s/%s)\r\n",
#if defined (DDS_SECURITY) && defined (DDS_NATIVE_SECURITY)
				(SECURE_DISCOVERY (dp, rp->r_disc_prot)) ? "secure " : 
#endif
									   "",
				str_ptr (tp->name), 
				str_ptr (tp->type->type_name));
	endpoint = rp->r_handle;
	sys_getftime (&time);
	error = rtps_writer_write (sw, &endpoint, sizeof (endpoint), handle,
							hci, &time, NULL, 0);
	lock_release (sw->w_lock);
	if (error)
		warn_printf ("sedp_subscription_update: write failure!");

	return (DDS_RETCODE_OK);
}
Esempio n. 20
0
int dds_cond_wait (cond_t *cv, HANDLE mutex)
{
	int	res, last_waiter;

	/* Prevent race conditions on the waiters count. */
	lock_take (cv->waiters_lock);
	cv->waiters++;
	lock_release (cv->waiters_lock);

	res = DDS_RETCODE_OK;

	/* We keep the lock held just long enough to increment the count of
	   waiters by one.  Note that we can't keep it held across the call
	   sema_wait() since that will deadlock other calls to cond_signal(). */
	if (lock_release (mutex))
		return (DDS_RETCODE_ERROR);

	/* Wait to be awakened by a cond_signal() or cond_signal_all(). */
	res = sema_take (cv->sema);

	/* Reacquire lock to avoid race conditions on the waiters count. */
	lock_take (cv->waiters_lock);

	/* We're ready to return, so there's one less waiter. */
	cv->waiters--;
	last_waiter = cv->was_broadcast && cv->waiters == 0;

	/* Release the lock so that other collaborating threads can make
	   progress. */
	lock_release (cv->waiters_lock);

	if (res)
		; /* Bad things happened, so let's just return. */

	/* If we're the last waiter thread during this particular broadcast
	   then let all the other threads proceed. */
	if (last_waiter)
		ev_signal (cv->waiters_done);

	/* We must always regain the external_mutex, even when errors
	   occur because that's the guarantee that we give to our callers. */
	lock_take (mutex);

	return (res);
}
Esempio n. 21
0
int sock_fd_add_socket (SOCKET s, short events, RSDATAFCT rx_fct, void *udata, const char *name)
{
	SockSocket_t	*sp;
	void		*p;
	unsigned	e;
	WSAEVENT	ev;

	lock_take (sock_lock);
	if ((ev = WSACreateEvent ()) == WSA_INVALID_EVENT) {
		lock_release (sock_lock);
		return (DDS_RETCODE_OUT_OF_RESOURCES);
	}
	if (num_socks == max_socks ||
	    nhandles >= MAXIMUM_WAIT_OBJECTS) {
		if (!max_socks ||
		    max_socks > MAX_SIZE ||
		    nhandles >= MAXIMUM_WAIT_OBJECTS) {
			WSACloseEvent (ev);
			lock_release (sock_lock);
			return (DDS_RETCODE_OUT_OF_RESOURCES);
		}
		max_socks += INC_SIZE;
		p = xrealloc (sockets, sizeof (SockSocket_t) * max_socks);
		if (!p)
			fatal_printf ("sock_fd_add_socket: can't realloc()!");

		sockets = p;
	}

	/*printf ("socket added: fd=%d, events=%d\n", s, events);*/
	sp = &(*sockets) [num_socks];
	sp->is_socket = 1;
	sp->index = nhandles;
	sp->socket = s;
	sp->events = events;
	sp->name = name;
	sp->handle = ev;
	e = 0;
	if ((events & POLLIN) != 0)
		e = FD_READ;
	if ((events & POLLPRI) != 0)
		e |= FD_OOB;
	if ((events & POLLOUT) != 0)
		e |= FD_WRITE;
	if ((events & POLLHUP) != 0)
		e |= FD_CLOSE;
	if (WSAEventSelect (s, ev, e))
		fatal_printf ("sock_fd_add_socket(): WSAEventSelect() failed - error = %d", WSAGetLastError ());

	sp->sfct = rx_fct;
	sp->udata = udata;
	num_socks++;
	whandles [nhandles] = ev;
	wsock [nhandles++] = (Sock_t *) sp;
	lock_release (sock_lock);
	return (DDS_RETCODE_OK);
}
Esempio n. 22
0
void sock_fd_event_socket (int fd, short events, int set)
{
	unsigned	i, n;

	lock_take (sock_lock);
	n = atomic_get_w (num_fds);
	for (i = 0; i < n; i++)
		if ((*fds) [i].fd == fd) {
			lock_take (poll_lock);
			if (set)
				(*fds) [i].events |= events;
			else
				(*fds) [i].events &= ~events;
			lock_release (poll_lock);
			break;
		}
	lock_release (sock_lock);
}
Esempio n. 23
0
DDS_ReturnCode_t DDS_SP_update_start (void)
{
	msp_init ();
	input_counter = 1;

	/* Take access db lock */
	lock_init ();
	lock_take (sp_lock);
	return (DDS_RETCODE_OK);
}
Esempio n. 24
0
LocatorNode_t *locator_new (LocatorKind_t       kind,
			    const unsigned char *addr,
			    uint32_t            port)
{
	LocatorNode_t	*np;

	lock_take (loc_lock);
	np = locator_new_node (kind, addr, port);	
	lock_release (loc_lock);
	return (np);
}
Esempio n. 25
0
DDS_ReturnCode_t DDS_DataWriter_enable (DDS_DataWriter wp)
{
	Topic_t			*tp;
	DDS_ReturnCode_t	ret;

	ctrc_printd (DCPS_ID, DCPS_DW_ENABLE, &wp, sizeof (wp));

	if (!writer_ptr (wp, 0, &ret))
		return (ret);

	tp = wp->w_topic;
	lock_take (tp->domain->lock);
	lock_take (tp->lock);
	if ((tp->entity.flags & EF_ENABLED) == 0 ||
	    (wp->w_publisher->entity.flags & EF_ENABLED) == 0) {
		lock_release (tp->domain->lock);
		lock_release (tp->lock);
		return (DDS_RETCODE_NOT_ENABLED);
	}
#ifdef RW_LOCKS
	lock_take (wp->w_lock);
#endif
	if ((wp->w_flags & EF_ENABLED) == 0) {

		/* Deliver new publication endpoint to the Discovery subsystem. */
		wp->w_flags |= EF_ENABLED | EF_NOT_IGNORED;
		hc_enable (wp->w_cache);
		if ((wp->w_publisher->entity.flags & EF_SUSPEND) != 0)
			dcps_suspended_publication_add (wp->w_publisher, wp, 1);
		else
			disc_writer_add (wp->w_publisher->domain, wp);

	}
#ifdef RW_LOCKS
	lock_release (wp->w_lock);
#endif
	lock_release (tp->lock);
	lock_release (tp->domain->lock);
	return (DDS_RETCODE_OK);
}
Esempio n. 26
0
void sock_fd_dump (void)
{
	unsigned	i, n;

	lock_take (sock_lock);
	n = atomic_get_w (num_fds);
	for (i = 0; i < n; i++) {
		dbg_printf ("%d: [%d] %s {%s} -> ", i, (*fds) [i].fd, (*names) [i], dbg_poll_event_str ((*fds) [i].events));
		dbg_printf ("{%s} ", dbg_poll_event_str ((*fds) [i].events));
		dbg_printf ("Rxfct=0x%lx, U=%p\r\n", (unsigned long) ((*fcts) [i]), (*ud) [i]);
	}
	lock_release (sock_lock);
}
Esempio n. 27
0
void sock_fd_poll (unsigned poll_time)
{
	struct pollfd	*iop;
	unsigned	i, n;

	lock_take (poll_lock);
	n = atomic_get_w (num_fds);
	/*printf ("*"); fflush (stdout);*/
	n_ready = poll (*fds, n, poll_time);
	lock_release (poll_lock);
	if (n_ready < 0) {
		log_printf (LOG_DEF_ID, 0, "sock_fd_poll: poll() returned error: %s\r\n", strerror (errno));
		return;
	}
	else if (!n_ready) {
		/* avoid starvation of other threads waiting on poll_lock.
		 * These other threads always hold sock_lock. so locking and unlocking
		 * sock_lock here, gives the proper synchronization.
		 * In case no one is waiting, this is a waste of time, but without a
		 * rewrite this is not solvable.
		 */
		lock_take (sock_lock);
		lock_release (sock_lock);
		return;
	}

	lock_take (sock_lock);
	n = atomic_get_w (num_fds);
	for (i = 0, iop = *fds; i < n; i++, iop++) {
		if (iop->revents) {
			/*dbg_printf ("sock: %u %d=0x%04x->0x%04x\r\n", i, iop->fd, iop->events, iop->revents);*/
			dds_lock_ev ();
			dds_ev_pending |= DDS_EV_IO;
			dds_unlock_ev ();
			break;
		}
	}
	lock_release (sock_lock);
}
Esempio n. 28
0
void evt_free(spdid_t spdid, long extern_evt)
{
	struct evt *e;

	lock_take(&evt_lock);
	e = mapping_find(extern_evt);
	if (NULL == e) goto done;
	__evt_free(e);
	mapping_free(extern_evt);
done:
	lock_release(&evt_lock);
	return;
}
Esempio n. 29
0
int sock_fd_add (int fd, short events, RSDATAFCT rx_fct, void *udata, const char *name)
{
	unsigned	n;

	if (!max_fds)
		sock_fd_init ();
	lock_take (sock_lock);
	n = atomic_get_w (num_fds);
	if (n == atomic_get_w (max_fds)) {
		if (max_fds >= fd_max_size) {
			lock_release (sock_lock);
			return (DDS_RETCODE_OUT_OF_RESOURCES);
		}
		lock_take (poll_lock);
		atomic_add_w (max_fds, FD_INC_SIZE);
#if defined (NUTTX_RTOS)		
		fds = realloc (fds, sizeof (struct pollfd) * max_fds);
		fcts = realloc (fcts, sizeof (RSDATAFCT) * max_fds);
		ud = realloc (ud, sizeof (void *) * max_fds);
#else
		fds = xrealloc (fds, sizeof (struct pollfd) * max_fds);
		fcts = xrealloc (fcts, sizeof (RSDATAFCT) * max_fds);
		ud = xrealloc (ud, sizeof (void *) * max_fds);		
#endif
		lock_release (poll_lock);
		if (!fds || !fcts || !ud)
			fatal_printf ("rtps_fd_add: can't realloc()!");
	}
	/*printf ("socket added: fd=%d, events=%d\n", fd, events);*/
	(*fds) [n].fd = fd;
	(*fds) [n].events = events;
	(*fds) [n].revents = 0;
	(*fcts) [n] = rx_fct;
	(*ud) [n] = udata;
	(*names) [n] = name;
	atomic_inc_w (num_fds);
	lock_release (sock_lock);
	return (DDS_RETCODE_OK);
}
Esempio n. 30
0
static int sedp_publication_update (Domain_t *dp, Writer_t *wp)
{
	GUID_t		guid;
	Writer_t	*pw;
	HCI		hci;
	InstanceHandle	handle;
	DDS_HANDLE	endpoint;
	FTime_t		time;
	int		error;

	/* Derive key and publication endpoint. */
	guid.prefix = dp->participant.p_guid_prefix;
	guid.entity_id = wp->w_entity_id;
#if defined (DDS_SECURITY) && defined (DDS_NATIVE_SECURITY)
	if (SECURE_DISCOVERY (dp, wp->w_disc_prot))
		pw = (Writer_t *) dp->participant.p_builtin_ep [EPB_PUBLICATION_SEC_W];
	else
#endif
		pw = (Writer_t *) dp->participant.p_builtin_ep [EPB_PUBLICATION_W];
	if (!pw)
		return (DDS_RETCODE_ALREADY_DELETED);

	/* Lookup instance. */
	lock_take (pw->w_lock);
	hci = hc_lookup_key (pw->w_cache, (unsigned char *) &guid,
							sizeof (guid), &handle);
	if (!hci) {
		warn_printf ("sedp_publication_update: failed to lookup instance handle!");
		lock_release (pw->w_lock);
		return (DDS_RETCODE_ALREADY_DELETED);
	}

	/* Write publication data. */
	if (sedp_log)
		log_printf (SEDP_ID, 0, "SEDP: Resend %spublication (%s/%s)\r\n",
#if defined (DDS_SECURITY) && defined (DDS_NATIVE_SECURITY)
				(SECURE_DISCOVERY (dp, wp->w_disc_prot)) ? "secure " : 
#endif
									   "",
				str_ptr (wp->w_topic->name), 
				str_ptr (wp->w_topic->type->type_name));
	endpoint = wp->w_handle;
	sys_getftime (&time);
	error = rtps_writer_write (pw, &endpoint, sizeof (endpoint), handle,
							hci, &time, NULL, 0);
	lock_release (pw->w_lock);
	if (error)
		warn_printf ("sedp_publication_update: write failure!");

	return (error);
}