コード例 #1
0
/** Timer fibril.
 *
 * @param arg	Timer
 */
static int fibril_timer_func(void *arg)
{
	fibril_timer_t *timer = (fibril_timer_t *) arg;
	int rc;

	fibril_mutex_lock(&timer->lock);

	while (true) {
		while (timer->state != fts_active &&
		    timer->state != fts_cleanup) {

			if (timer->state == fts_cleanup)
				break;

			fibril_condvar_wait(&timer->cv, &timer->lock);
		}

		if (timer->state == fts_cleanup)
			break;

		rc = fibril_condvar_wait_timeout(&timer->cv, &timer->lock,
		    timer->delay);
		if (rc == ETIMEOUT) {
			timer->state = fts_fired;
			fibril_mutex_unlock(&timer->lock);
			timer->fun(timer->arg);
			fibril_mutex_lock(&timer->lock);
		}
	}

	fibril_mutex_unlock(&timer->lock);
	return 0;
}
コード例 #2
0
ファイル: assoc.c プロジェクト: fhector/helenOS-0.5-Hector
/** Get a received message.
 *
 * Pull one message from the association's receive queue.
 */
int udp_assoc_recv(udp_assoc_t *assoc, udp_msg_t **msg, udp_sock_t *fsock)
{
	link_t *link;
	udp_rcv_queue_entry_t *rqe;

	log_msg(LVL_DEBUG, "udp_assoc_recv()");

	fibril_mutex_lock(&assoc->lock);
	while (list_empty(&assoc->rcv_queue)) {
		log_msg(LVL_DEBUG, "udp_assoc_recv() - waiting");
		fibril_condvar_wait(&assoc->rcv_queue_cv, &assoc->lock);
	}

	log_msg(LVL_DEBUG, "udp_assoc_recv() - got a message");
	link = list_first(&assoc->rcv_queue);
	rqe = list_get_instance(link, udp_rcv_queue_entry_t, link);
	list_remove(link);
	fibril_mutex_unlock(&assoc->lock);

	*msg = rqe->msg;
	*fsock = rqe->sp.foreign;
	free(rqe);

	return EOK;
}
コード例 #3
0
ファイル: usbhub.c プロジェクト: fhector/helenOS-0.5-Hector
/**
 * callback called from hub polling fibril when the fibril terminates
 *
 * Does not perform cleanup, just marks the hub as not running.
 * @param device usb device afected
 * @param was_error indicates that the fibril is stoped due to an error
 * @param data pointer to usb_hub_dev_t structure
 */
static void usb_hub_polling_terminated_callback(usb_device_t *device,
    bool was_error, void *data)
{
	usb_hub_dev_t *hub = data;
	assert(hub);

	fibril_mutex_lock(&hub->pending_ops_mutex);

	/* The device is dead. However there might be some pending operations
	 * that we need to wait for.
	 * One of them is device adding in progress.
	 * The respective fibril is probably waiting for status change
	 * in port reset (port enable) callback.
	 * Such change would never come (otherwise we would not be here).
	 * Thus, we would flush all pending port resets.
	 */
	if (hub->pending_ops_count > 0) {
		for (size_t port = 0; port < hub->port_count; ++port) {
			usb_hub_port_reset_fail(&hub->ports[port]);
		}
	}
	/* And now wait for them. */
	while (hub->pending_ops_count > 0) {
		fibril_condvar_wait(&hub->pending_ops_cv,
		    &hub->pending_ops_mutex);
	}
	fibril_mutex_unlock(&hub->pending_ops_mutex);
	hub->running = false;
}
コード例 #4
0
/** RECEIVE user call */
tcp_error_t tcp_uc_receive(tcp_conn_t *conn, void *buf, size_t size,
    size_t *rcvd, xflags_t *xflags)
{
	size_t xfer_size;

	log_msg(LOG_DEFAULT, LVL_DEBUG, "%s: tcp_uc_receive()", conn->name);

	fibril_mutex_lock(&conn->lock);

	if (conn->cstate == st_closed) {
		fibril_mutex_unlock(&conn->lock);
		return TCP_ENOTEXIST;
	}

	/* Wait for data to become available */
	while (conn->rcv_buf_used == 0 && !conn->rcv_buf_fin && !conn->reset) {
		log_msg(LOG_DEFAULT, LVL_DEBUG, "tcp_uc_receive() - wait for data");
		fibril_condvar_wait(&conn->rcv_buf_cv, &conn->lock);
	}

	if (conn->rcv_buf_used == 0) {
		*rcvd = 0;
		*xflags = 0;

		if (conn->rcv_buf_fin) {
			/* End of data, peer closed connection */
			fibril_mutex_unlock(&conn->lock);
			return TCP_ECLOSING;
		} else {
			/* Connection was reset */
			assert(conn->reset);
			fibril_mutex_unlock(&conn->lock);
			return TCP_ERESET;
		}
	}

	/* Copy data from receive buffer to user buffer */
	xfer_size = min(size, conn->rcv_buf_used);
	memcpy(buf, conn->rcv_buf, xfer_size);
	*rcvd = xfer_size;

	/* Remove data from receive buffer */
	memmove(conn->rcv_buf, conn->rcv_buf + xfer_size, conn->rcv_buf_used -
	    xfer_size);
	conn->rcv_buf_used -= xfer_size;
	conn->rcv_wnd += xfer_size;

	/* TODO */
	*xflags = 0;

	/* Send new size of receive window */
	tcp_tqueue_ctrl_seg(conn, CTL_ACK);

	log_msg(LOG_DEFAULT, LVL_DEBUG, "%s: tcp_uc_receive() - returning %zu bytes",
	    conn->name, xfer_size);

	fibril_mutex_unlock(&conn->lock);

	return TCP_EOK;
}
コード例 #5
0
ファイル: endpoint.c プロジェクト: fhector/helenOS-0.5-Hector
/** Mark the endpoint as active and block access for further fibrils.
 * @param instance endpoint_t structure.
 */
void endpoint_use(endpoint_t *instance)
{
	assert(instance);
	fibril_mutex_lock(&instance->guard);
	while (instance->active)
		fibril_condvar_wait(&instance->avail, &instance->guard);
	instance->active = true;
	fibril_mutex_unlock(&instance->guard);
}
コード例 #6
0
ファイル: ucall.c プロジェクト: jvesely/helenos
/** SEND user call */
tcp_error_t tcp_uc_send(tcp_conn_t *conn, void *data, size_t size,
    xflags_t flags)
{
	size_t buf_free;
	size_t xfer_size;

	log_msg(LOG_DEFAULT, LVL_DEBUG, "%s: tcp_uc_send()", conn->name);

	tcp_conn_lock(conn);

	if (conn->cstate == st_closed) {
		tcp_conn_unlock(conn);
		return TCP_ENOTEXIST;
	}

	if (conn->cstate == st_listen) {
		/* Change connection to active */
		tcp_conn_sync(conn);
	}


	if (conn->snd_buf_fin) {
		tcp_conn_unlock(conn);
		return TCP_ECLOSING;
	}

	while (size > 0) {
		buf_free = conn->snd_buf_size - conn->snd_buf_used;
		while (buf_free == 0 && !conn->reset) {
			log_msg(LOG_DEFAULT, LVL_DEBUG, "%s: buf_free == 0, waiting.",
			    conn->name);
			fibril_condvar_wait(&conn->snd_buf_cv, &conn->lock);
			buf_free = conn->snd_buf_size - conn->snd_buf_used;
		}

		if (conn->reset) {
			tcp_conn_unlock(conn);
			return TCP_ERESET;
		}

		xfer_size = min(size, buf_free);

		/* Copy data to buffer */
		memcpy(conn->snd_buf + conn->snd_buf_used, data, xfer_size);
		data += xfer_size;
		conn->snd_buf_used += xfer_size;
		size -= xfer_size;

		tcp_tqueue_new_data(conn);
	}

	tcp_tqueue_new_data(conn);
	tcp_conn_unlock(conn);

	return TCP_EOK;
}
コード例 #7
0
ファイル: ucall.c プロジェクト: jvesely/helenos
/** OPEN user call
 *
 * @param epp		Endpoint pair
 * @param acpass	Active/passive
 * @param oflags	Open flags
 * @param conn		Connection
 *
 * Unlike in the spec we allow specifying the local address. This means
 * the implementation does not need to magically guess it, especially
 * considering there can be more than one local address.
 *
 * XXX We should be able to call active open on an existing listening
 * connection.
 * XXX We should be able to get connection structure immediately, before
 * establishment.
 */
tcp_error_t tcp_uc_open(inet_ep2_t *epp, acpass_t acpass,
    tcp_open_flags_t oflags, tcp_conn_t **conn)
{
	tcp_conn_t *nconn;
	int rc;

	log_msg(LOG_DEFAULT, LVL_DEBUG, "tcp_uc_open(%p, %s, %s, %p)",
	    epp, acpass == ap_active ? "active" : "passive",
	    oflags == tcp_open_nonblock ? "nonblock" : "none", conn);

	nconn = tcp_conn_new(epp);
	rc = tcp_conn_add(nconn);
	if (rc != EOK) {
		tcp_conn_delete(nconn);
		return TCP_EEXISTS;
	}

	tcp_conn_lock(nconn);

	if (acpass == ap_active) {
		/* Synchronize (initiate) connection */
		tcp_conn_sync(nconn);
	}

	if (oflags == tcp_open_nonblock) {
		tcp_conn_unlock(nconn);
		log_msg(LOG_DEFAULT, LVL_DEBUG, "tcp_uc_open -> %p", nconn);
		*conn = nconn;
		return TCP_EOK;
	}

	/* Wait for connection to be established or reset */
	log_msg(LOG_DEFAULT, LVL_DEBUG, "tcp_uc_open: Wait for connection.");
	while (nconn->cstate == st_listen ||
	    nconn->cstate == st_syn_sent ||
	    nconn->cstate == st_syn_received) {
		fibril_condvar_wait(&nconn->cstate_cv, &nconn->lock);
	}

	if (nconn->cstate != st_established) {
		log_msg(LOG_DEFAULT, LVL_DEBUG, "tcp_uc_open: Connection was reset.");
		assert(nconn->cstate == st_closed);
		tcp_conn_unlock(nconn);
		return TCP_ERESET;
	}

	tcp_conn_unlock(nconn);
	log_msg(LOG_DEFAULT, LVL_DEBUG, "tcp_uc_open: Connection was established.");

	*conn = nconn;
	log_msg(LOG_DEFAULT, LVL_DEBUG, "tcp_uc_open -> %p", nconn);
	return TCP_EOK;
}
コード例 #8
0
ファイル: udp.c プロジェクト: jvesely/helenos
/** Destroy UDP client instance.
 *
 * @param udp UDP client
 */
void udp_destroy(udp_t *udp)
{
	if (udp == NULL)
		return;

	async_hangup(udp->sess);

	fibril_mutex_lock(&udp->lock);
	while (!udp->cb_done)
		fibril_condvar_wait(&udp->cv, &udp->lock);
	fibril_mutex_unlock(&udp->lock);

	free(udp);
}
コード例 #9
0
ファイル: prodcons.c プロジェクト: fhector/helenOS-0.5-Hector
link_t *prodcons_consume(prodcons_t *pc)
{
	fibril_mutex_lock(&pc->mtx);
	
	while (list_empty(&pc->list))
		fibril_condvar_wait(&pc->cv, &pc->mtx);
	
	link_t *head = list_first(&pc->list);
	list_remove(head);
	
	fibril_mutex_unlock(&pc->mtx);
	
	return head;
}
コード例 #10
0
ファイル: vfs_file.c プロジェクト: jvesely/helenos
int vfs_wait_handle_internal(void)
{
	vfs_client_data_t *vfs_data = VFS_DATA;	
	int fd;
	
	fibril_mutex_lock(&vfs_data->lock);
	while (list_empty(&vfs_data->passed_handles))
		fibril_condvar_wait(&vfs_data->cv, &vfs_data->lock);
	link_t *lnk = list_first(&vfs_data->passed_handles);
	list_remove(lnk);
	fibril_mutex_unlock(&vfs_data->lock);

	vfs_boxed_handle_t *bh = list_get_instance(lnk, vfs_boxed_handle_t, link);
	fd = bh->handle;
	free(bh);

	return fd;
}
コード例 #11
0
ファイル: sock.c プロジェクト: fhector/helenOS-0.5-Hector
static int tcp_sock_recv_fibril(void *arg)
{
	tcp_sockdata_t *sock = (tcp_sockdata_t *)arg;
	size_t data_len;
	xflags_t xflags;
	tcp_error_t trc;

	log_msg(LVL_DEBUG, "tcp_sock_recv_fibril()");

	fibril_mutex_lock(&sock->recv_buffer_lock);

	while (true) {
		log_msg(LVL_DEBUG, "call tcp_uc_receive()");
		while (sock->recv_buffer_used != 0 && sock->sock_core != NULL)
			fibril_condvar_wait(&sock->recv_buffer_cv,
			    &sock->recv_buffer_lock);

		trc = tcp_uc_receive(sock->conn, sock->recv_buffer,
		    TCP_SOCK_FRAGMENT_SIZE, &data_len, &xflags);

		if (trc != TCP_EOK) {
			sock->recv_error = trc;
			fibril_condvar_broadcast(&sock->recv_buffer_cv);
			if (sock->sock_core != NULL)
				tcp_sock_notify_data(sock->sock_core);
			break;
		}

		log_msg(LVL_DEBUG, "got data - broadcast recv_buffer_cv");

		sock->recv_buffer_used = data_len;
		fibril_condvar_broadcast(&sock->recv_buffer_cv);
		if (sock->sock_core != NULL)
			tcp_sock_notify_data(sock->sock_core);
	}

	fibril_mutex_unlock(&sock->recv_buffer_lock);

	tcp_uc_delete(sock->conn);

	return 0;
}
コード例 #12
0
ファイル: sock.c プロジェクト: fhector/helenOS-0.5-Hector
static int udp_sock_recv_fibril(void *arg)
{
	udp_sockdata_t *sock = (udp_sockdata_t *)arg;
	udp_error_t urc;
	xflags_t xflags;
	size_t rcvd;

	log_msg(LVL_DEBUG, "udp_sock_recv_fibril()");

	while (true) {
		log_msg(LVL_DEBUG, "[] wait for rcv buffer empty()");
		fibril_mutex_lock(&sock->recv_buffer_lock);
		while (sock->recv_buffer_used != 0) {
			fibril_condvar_wait(&sock->recv_buffer_cv,
			    &sock->recv_buffer_lock);
		}

		log_msg(LVL_DEBUG, "[] call udp_uc_receive()");
		urc = udp_uc_receive(sock->assoc, sock->recv_buffer,
		    UDP_FRAGMENT_SIZE, &rcvd, &xflags, &sock->recv_fsock);
		sock->recv_error = urc;

		udp_sock_notify_data(sock->sock_core);

		if (urc != UDP_EOK) {
			fibril_condvar_broadcast(&sock->recv_buffer_cv);
			fibril_mutex_unlock(&sock->recv_buffer_lock);
			break;
		}

		log_msg(LVL_DEBUG, "[] got data - broadcast recv_buffer_cv");

		sock->recv_buffer_used = rcvd;
		fibril_mutex_unlock(&sock->recv_buffer_lock);
		fibril_condvar_broadcast(&sock->recv_buffer_cv);
	}

	udp_uc_destroy(sock->assoc);

	return 0;
}
コード例 #13
0
ファイル: sock.c プロジェクト: fhector/helenOS-0.5-Hector
static void tcp_sock_recvfrom(tcp_client_t *client, ipc_callid_t callid, ipc_call_t call)
{
	int socket_id;
	int flags;
	size_t addr_length, length;
	socket_core_t *sock_core;
	tcp_sockdata_t *socket;
	ipc_call_t answer;
	ipc_callid_t rcallid;
	size_t data_len;
	struct sockaddr_in addr;
	tcp_sock_t *rsock;
	int rc;

	log_msg(LVL_DEBUG, "%p: tcp_sock_recv[from]()", client);

	socket_id = SOCKET_GET_SOCKET_ID(call);
	flags = SOCKET_GET_FLAGS(call);

	sock_core = socket_cores_find(&client->sockets, socket_id);
	if (sock_core == NULL) {
		async_answer_0(callid, ENOTSOCK);
		return;
	}

	socket = (tcp_sockdata_t *)sock_core->specific_data;
	fibril_mutex_lock(&socket->lock);

	if (socket->conn == NULL) {
		fibril_mutex_unlock(&socket->lock);
		async_answer_0(callid, ENOTCONN);
		return;
	}

	(void)flags;

	log_msg(LVL_DEBUG, "tcp_sock_recvfrom(): lock recv_buffer_lock");
	fibril_mutex_lock(&socket->recv_buffer_lock);
	while (socket->recv_buffer_used == 0 && socket->recv_error == TCP_EOK) {
		log_msg(LVL_DEBUG, "wait for recv_buffer_cv + recv_buffer_used != 0");
		fibril_condvar_wait(&socket->recv_buffer_cv,
		    &socket->recv_buffer_lock);
	}

	log_msg(LVL_DEBUG, "Got data in sock recv_buffer");

	data_len = socket->recv_buffer_used;
	rc = socket->recv_error;

	switch (socket->recv_error) {
	case TCP_EOK:
		rc = EOK;
		break;
	case TCP_ENOTEXIST:
	case TCP_ECLOSING:
		rc = ENOTCONN;
		break;
	case TCP_ERESET:
		rc = ECONNABORTED;
		break;
	default:
		assert(false);
	}

	log_msg(LVL_DEBUG, "**** recv result -> %d", rc);
	if (rc != EOK) {
		fibril_mutex_unlock(&socket->recv_buffer_lock);
		fibril_mutex_unlock(&socket->lock);
		async_answer_0(callid, rc);
		return;
	}

	if (IPC_GET_IMETHOD(call) == NET_SOCKET_RECVFROM) {
		/* Fill addr */
		rsock = &socket->conn->ident.foreign;
		addr.sin_family = AF_INET;
		addr.sin_addr.s_addr = host2uint32_t_be(rsock->addr.ipv4);
		addr.sin_port = host2uint16_t_be(rsock->port);

		log_msg(LVL_DEBUG, "addr read receive");
		if (!async_data_read_receive(&rcallid, &addr_length)) {
			fibril_mutex_unlock(&socket->recv_buffer_lock);
			fibril_mutex_unlock(&socket->lock);
			async_answer_0(callid, EINVAL);
			return;
		}

		if (addr_length > sizeof(addr))
			addr_length = sizeof(addr);

		log_msg(LVL_DEBUG, "addr read finalize");
		rc = async_data_read_finalize(rcallid, &addr, addr_length);
		if (rc != EOK) {
			fibril_mutex_unlock(&socket->recv_buffer_lock);
			fibril_mutex_unlock(&socket->lock);
			async_answer_0(callid, EINVAL);
			return;
		}
	}

	log_msg(LVL_DEBUG, "data read receive");
	if (!async_data_read_receive(&rcallid, &length)) {
		fibril_mutex_unlock(&socket->recv_buffer_lock);
		fibril_mutex_unlock(&socket->lock);
		async_answer_0(callid, EINVAL);
		return;
	}

	if (length > data_len)
		length = data_len;

	log_msg(LVL_DEBUG, "data read finalize");
	rc = async_data_read_finalize(rcallid, socket->recv_buffer, length);

	socket->recv_buffer_used -= length;
	log_msg(LVL_DEBUG, "tcp_sock_recvfrom: %zu left in buffer",
	    socket->recv_buffer_used);
	if (socket->recv_buffer_used > 0) {
		memmove(socket->recv_buffer, socket->recv_buffer + length,
		    socket->recv_buffer_used);
		tcp_sock_notify_data(socket->sock_core);
	}

	fibril_condvar_broadcast(&socket->recv_buffer_cv);

	if (length < data_len && rc == EOK)
		rc = EOVERFLOW;

	SOCKET_SET_READ_DATA_LENGTH(answer, length);
	async_answer_1(callid, EOK, IPC_GET_ARG1(answer));

	fibril_mutex_unlock(&socket->recv_buffer_lock);
	fibril_mutex_unlock(&socket->lock);
}
コード例 #14
0
ファイル: sock.c プロジェクト: fhector/helenOS-0.5-Hector
static void udp_sock_recvfrom(udp_client_t *client, ipc_callid_t callid, ipc_call_t call)
{
	int socket_id;
	int flags;
	size_t addr_length, length;
	socket_core_t *sock_core;
	udp_sockdata_t *socket;
	ipc_call_t answer;
	ipc_callid_t rcallid;
	size_t data_len;
	udp_error_t urc;
	udp_sock_t rsock;
	struct sockaddr_in addr;
	int rc;

	log_msg(LVL_DEBUG, "%p: udp_sock_recv[from]()", client);

	socket_id = SOCKET_GET_SOCKET_ID(call);
	flags = SOCKET_GET_FLAGS(call);

	sock_core = socket_cores_find(&client->sockets, socket_id);
	if (sock_core == NULL) {
		async_answer_0(callid, ENOTSOCK);
		return;
	}

	socket = (udp_sockdata_t *)sock_core->specific_data;
	fibril_mutex_lock(&socket->lock);

	if (socket->assoc == NULL) {
		fibril_mutex_unlock(&socket->lock);
		async_answer_0(callid, ENOTCONN);
		return;
	}

	(void)flags;

	log_msg(LVL_DEBUG, "udp_sock_recvfrom(): lock recv_buffer lock");
	fibril_mutex_lock(&socket->recv_buffer_lock);
	while (socket->recv_buffer_used == 0 && socket->recv_error == UDP_EOK) {
		log_msg(LVL_DEBUG, "udp_sock_recvfrom(): wait for cv");
		fibril_condvar_wait(&socket->recv_buffer_cv,
		    &socket->recv_buffer_lock);
	}

	log_msg(LVL_DEBUG, "Got data in sock recv_buffer");

	rsock = socket->recv_fsock;
	data_len = socket->recv_buffer_used;
	urc = socket->recv_error;

	log_msg(LVL_DEBUG, "**** recv data_len=%zu", data_len);

	switch (urc) {
	case UDP_EOK:
		rc = EOK;
		break;
/*	case TCP_ENOTEXIST:
	case TCP_ECLOSING:
		rc = ENOTCONN;
		break;
	case TCP_ERESET:
		rc = ECONNABORTED;
		break;*/
	default:
		assert(false);
	}

	log_msg(LVL_DEBUG, "**** udp_uc_receive -> %d", rc);
	if (rc != EOK) {
		fibril_mutex_unlock(&socket->recv_buffer_lock);
		fibril_mutex_unlock(&socket->lock);
		async_answer_0(callid, rc);
		return;
	}

	if (IPC_GET_IMETHOD(call) == NET_SOCKET_RECVFROM) {
		/* Fill addr */
		addr.sin_family = AF_INET;
		addr.sin_addr.s_addr = host2uint32_t_be(rsock.addr.ipv4);
		addr.sin_port = host2uint16_t_be(rsock.port);

		log_msg(LVL_DEBUG, "addr read receive");
		if (!async_data_read_receive(&rcallid, &addr_length)) {
			fibril_mutex_unlock(&socket->recv_buffer_lock);
			fibril_mutex_unlock(&socket->lock);
			async_answer_0(callid, EINVAL);
			return;
		}

		if (addr_length > sizeof(addr))
			addr_length = sizeof(addr);

		log_msg(LVL_DEBUG, "addr read finalize");
		rc = async_data_read_finalize(rcallid, &addr, addr_length);
		if (rc != EOK) {
			fibril_mutex_unlock(&socket->recv_buffer_lock);
			fibril_mutex_unlock(&socket->lock);
			async_answer_0(callid, EINVAL);
			return;
		}
	}

	log_msg(LVL_DEBUG, "data read receive");
	if (!async_data_read_receive(&rcallid, &length)) {
		fibril_mutex_unlock(&socket->recv_buffer_lock);
		fibril_mutex_unlock(&socket->lock);
		async_answer_0(callid, EINVAL);
		return;
	}

	if (length > data_len)
		length = data_len;

	log_msg(LVL_DEBUG, "data read finalize");
	rc = async_data_read_finalize(rcallid, socket->recv_buffer, length);

	if (length < data_len && rc == EOK)
		rc = EOVERFLOW;

	log_msg(LVL_DEBUG, "read_data_length <- %zu", length);
	IPC_SET_ARG2(answer, 0);
	SOCKET_SET_READ_DATA_LENGTH(answer, length);
	SOCKET_SET_ADDRESS_LENGTH(answer, sizeof(addr));
	async_answer_3(callid, EOK, IPC_GET_ARG1(answer),
	    IPC_GET_ARG2(answer), IPC_GET_ARG3(answer));

	socket->recv_buffer_used = 0;

	fibril_condvar_broadcast(&socket->recv_buffer_cv);
	fibril_mutex_unlock(&socket->recv_buffer_lock);
	fibril_mutex_unlock(&socket->lock);
}