static void
vr_dpdk_packet_ring_drain(struct vr_usocket *usockp)
{
    int i;
    unsigned nb_pkts;
    struct rte_mbuf *mbuf_arr[VR_DPDK_RX_BURST_SZ];
    const unsigned lcore_id = rte_lcore_id();
    struct vr_interface_stats *stats;

    RTE_LOG(DEBUG, USOCK, "%s[%lx]: draining packet ring...\n", __func__,
            pthread_self());

    if (unlikely(usockp->usock_parent->usock_vif == NULL))
        return;

    rcu_thread_offline();

    stats = vif_get_stats(usockp->usock_parent->usock_vif, lcore_id);
    do {
        nb_pkts = rte_ring_sc_dequeue_burst(vr_dpdk.packet_ring,
            (void **)&mbuf_arr, VR_DPDK_RX_BURST_SZ);
        for (i = 0; i < nb_pkts; i++) {
            if (usock_mbuf_write(usockp->usock_parent, mbuf_arr[i]) >= 0)
                stats->vis_port_opackets++;
            else {
                stats->vis_port_oerrors++;
                RTE_LOG(DEBUG, USOCK,
                        "%s: Error writing mbuf to packet socket: %s (%d)\n",
                        __func__, rte_strerror(errno), errno);
            }

            rte_pktmbuf_free(mbuf_arr[i]);
        }
    } while (nb_pkts > 0);

    rcu_thread_online();
}
int
dpdk_packet_io(void)
{
    int ret;
    struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[rte_lcore_id()];

wait_for_connection:
    RTE_LOG(DEBUG, VROUTER, "%s[%lx]: waiting for packet transport\n",
                __func__, pthread_self());

    /* Set the thread offline while busy waiting for the
     * transport socket to apperar.
     */
    rcu_thread_offline();
    while (!vr_dpdk.packet_transport) {
        /* handle an IPC command */
        if (unlikely(vr_dpdk_lcore_cmd_handle(lcore)))
            return -1;
        usleep(VR_DPDK_SLEEP_SERVICE_US);
    }
    rcu_thread_online();

    RTE_LOG(DEBUG, VROUTER, "%s[%lx]: FD %d\n", __func__, pthread_self(),
                ((struct vr_usocket *)vr_dpdk.packet_transport)->usock_fd);
    ret = vr_usocket_io(vr_dpdk.packet_transport);
    if (ret < 0) {
        vr_dpdk.packet_transport = NULL;
        /* handle an IPC command */
        if (unlikely(vr_dpdk_lcore_cmd_handle(lcore)))
            return -1;
        usleep(VR_DPDK_SLEEP_SERVICE_US);
        goto wait_for_connection;
    }

    return ret;
}
/*
 * Thread managing health check socket.
 */
void *thread_manage_health(void *data)
{
	int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
	uint32_t revents, nb_fd;
	struct lttng_poll_event events;
	struct health_comm_msg msg;
	struct health_comm_reply reply;
	int is_root;

	DBG("[thread] Manage health check started");

	setup_health_path();

	rcu_register_thread();

	/* We might hit an error path before this is created. */
	lttng_poll_init(&events);

	/* Create unix socket */
	sock = lttcomm_create_unix_sock(health_unix_sock_path);
	if (sock < 0) {
		ERR("Unable to create health check Unix socket");
		ret = -1;
		goto error;
	}

	is_root = !getuid();
	if (is_root) {
		/* lttng health client socket path permissions */
		ret = chown(health_unix_sock_path, 0,
				utils_get_group_id(tracing_group_name));
		if (ret < 0) {
			ERR("Unable to set group on %s", health_unix_sock_path);
			PERROR("chown");
			ret = -1;
			goto error;
		}

		ret = chmod(health_unix_sock_path,
				S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
		if (ret < 0) {
			ERR("Unable to set permissions on %s", health_unix_sock_path);
			PERROR("chmod");
			ret = -1;
			goto error;
		}
	}

	/*
	 * Set the CLOEXEC flag. Return code is useless because either way, the
	 * show must go on.
	 */
	(void) utils_set_fd_cloexec(sock);

	ret = lttcomm_listen_unix_sock(sock);
	if (ret < 0) {
		goto error;
	}

	/* Size is set to 1 for the consumer_channel pipe */
	ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
	if (ret < 0) {
		ERR("Poll set creation failed");
		goto error;
	}

	ret = lttng_poll_add(&events, health_quit_pipe[0], LPOLLIN);
	if (ret < 0) {
		goto error;
	}

	/* Add the application registration socket */
	ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
	if (ret < 0) {
		goto error;
	}

	/* Perform prior memory accesses before decrementing ready */
	cmm_smp_mb__before_uatomic_dec();
	uatomic_dec(&lttng_consumer_ready);

	while (1) {
		DBG("Health check ready");

		/* Inifinite blocking call, waiting for transmission */
restart:
		ret = lttng_poll_wait(&events, -1);
		if (ret < 0) {
			/*
			 * Restart interrupted system call.
			 */
			if (errno == EINTR) {
				goto restart;
			}
			goto error;
		}

		nb_fd = ret;

		for (i = 0; i < nb_fd; i++) {
			/* Fetch once the poll data */
			revents = LTTNG_POLL_GETEV(&events, i);
			pollfd = LTTNG_POLL_GETFD(&events, i);

			if (!revents) {
				/* No activity for this FD (poll implementation). */
				continue;
			}

			/* Thread quit pipe has been closed. Killing thread. */
			ret = check_health_quit_pipe(pollfd, revents);
			if (ret) {
				err = 0;
				goto exit;
			}

			/* Event on the registration socket */
			if (pollfd == sock) {
				if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)
						&& !(revents & LPOLLIN)) {
					ERR("Health socket poll error");
					goto error;
				}
			}
		}

		new_sock = lttcomm_accept_unix_sock(sock);
		if (new_sock < 0) {
			goto error;
		}

		/*
		 * Set the CLOEXEC flag. Return code is useless because either way, the
		 * show must go on.
		 */
		(void) utils_set_fd_cloexec(new_sock);

		DBG("Receiving data from client for health...");
		ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
		if (ret <= 0) {
			DBG("Nothing recv() from client... continuing");
			ret = close(new_sock);
			if (ret) {
				PERROR("close");
			}
			new_sock = -1;
			continue;
		}

		rcu_thread_online();

		assert(msg.cmd == HEALTH_CMD_CHECK);

		memset(&reply, 0, sizeof(reply));
		for (i = 0; i < NR_HEALTH_CONSUMERD_TYPES; i++) {
			/*
			 * health_check_state return 0 if thread is in
			 * error.
			 */
			if (!health_check_state(health_consumerd, i)) {
				reply.ret_code |= 1ULL << i;
			}
		}

		DBG("Health check return value %" PRIx64, reply.ret_code);

		ret = send_unix_sock(new_sock, (void *) &reply, sizeof(reply));
		if (ret < 0) {
			ERR("Failed to send health data back to client");
		}

		/* End of transmission */
		ret = close(new_sock);
		if (ret) {
			PERROR("close");
		}
		new_sock = -1;
	}

exit:
error:
	if (err) {
		ERR("Health error occurred in %s", __func__);
	}
	DBG("Health check thread dying");
	unlink(health_unix_sock_path);
	if (sock >= 0) {
		ret = close(sock);
		if (ret) {
			PERROR("close");
		}
	}

	lttng_poll_clean(&events);

	rcu_unregister_thread();
	return NULL;
}
/*
 * start io on socket
 */
int
vr_usocket_io(void *transport)
{
    int ret, i, processed;
    int timeout;
    struct pollfd *pfd;
    struct vr_usocket *usockp = (struct vr_usocket *)transport;
    unsigned lcore_id = rte_lcore_id();
    struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id];

    if (!usockp)
        return -1;

    RTE_LOG(DEBUG, USOCK, "%s[%lx]: FD %d\n", __func__, pthread_self(),
                usockp->usock_fd);
    if ((ret = usock_init_poll(usockp)))
        goto return_from_io;

    pfd = &usockp->usock_pfds[0];
    pfd->fd = usockp->usock_fd;
    pfd->events = POLLIN;

    usockp->usock_io_in_progress = 1;

    timeout = usockp->usock_poll_block ? INFINITE_TIMEOUT : 0;
    while (1) {
        if (usockp->usock_should_close) {
            usock_close(usockp);
            return -1;
        }

        /*
         * Handle an IPC commands for IO_LCORE_ID up
         * and just check the stop flag for the rest.
         */
        if (lcore_id >= VR_DPDK_IO_LCORE_ID) {
            if (unlikely(vr_dpdk_lcore_cmd_handle(lcore)))
                break;
        } else {
            if (unlikely(vr_dpdk_is_stop_flag_set()))
                break;
        }

        rcu_thread_offline();
        ret = poll(usockp->usock_pfds, usockp->usock_max_cfds,
                timeout);
        if (ret < 0) {
            usock_set_error(usockp, ret);
            /* all other errors are fatal */
            if (errno != EINTR)
                goto return_from_io;
        }

        rcu_thread_online();

        processed = 0;
        pfd = usockp->usock_pfds;
        for (i = 0; (i < usockp->usock_max_cfds) && (processed < ret);
                i++, pfd++) {
            if ((pfd->fd >= 0)) {
                if (pfd->revents & POLLIN) {
                    if (i == 0) {
                        ret = vr_usocket_read(usockp);
                        if (ret < 0)
                            return ret;
                    } else {
                        vr_usocket_read(usockp->usock_children[i]);
                    }
                }

                if (pfd->revents & POLLOUT) {
                    usock_write(usockp->usock_children[i]);
                }

                if (pfd->revents & POLLHUP) {
                    if (i) {
                        usock_close(usockp->usock_children[i]);
                    } else {
                        break;
                    }
                }

                if (pfd->revents)
                    processed++;
            }
        }

        if (!timeout)
            return 0;
    }

return_from_io:
    usockp->usock_io_in_progress = 0;
    usock_deinit_poll(usockp);

    return ret;
}
Beispiel #5
0
/*
 * Thread managing health check socket.
 */
static void *thread_manage_health(void *data)
{
	const bool is_root = (getuid() == 0);
	int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
	uint32_t revents, nb_fd;
	struct lttng_poll_event events;
	struct health_comm_msg msg;
	struct health_comm_reply reply;
	/* Thread-specific quit pipe. */
	struct thread_notifiers *notifiers = data;
	const int quit_pipe_read_fd = lttng_pipe_get_readfd(
			notifiers->quit_pipe);

	DBG("[thread] Manage health check started");

	rcu_register_thread();

	/*
	 * Created with a size of two for:
	 *   - client socket
	 *   - thread quit pipe
	 */
	ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
	if (ret < 0) {
		goto error;
	}

	/* Create unix socket */
	sock = lttcomm_create_unix_sock(config.health_unix_sock_path.value);
	if (sock < 0) {
		ERR("Unable to create health check Unix socket");
		goto error;
	}

	if (is_root) {
		/* lttng health client socket path permissions */
		gid_t gid;

		ret = utils_get_group_id(config.tracing_group_name.value, true, &gid);
		if (ret) {
			/* Default to root group. */
			gid = 0;
		}

		ret = chown(config.health_unix_sock_path.value, 0, gid);
		if (ret < 0) {
			ERR("Unable to set group on %s", config.health_unix_sock_path.value);
			PERROR("chown");
			goto error;
		}

		ret = chmod(config.health_unix_sock_path.value,
				S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
		if (ret < 0) {
			ERR("Unable to set permissions on %s", config.health_unix_sock_path.value);
			PERROR("chmod");
			goto error;
		}
	}

	/*
	 * Set the CLOEXEC flag. Return code is useless because either way, the
	 * show must go on.
	 */
	(void) utils_set_fd_cloexec(sock);

	ret = lttcomm_listen_unix_sock(sock);
	if (ret < 0) {
		goto error;
	}

	ret = lttng_poll_add(&events, quit_pipe_read_fd, LPOLLIN | LPOLLERR);
	if (ret < 0) {
		goto error;
	}

	/* Add the application registration socket */
	ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
	if (ret < 0) {
		goto error;
	}

	mark_thread_as_ready(notifiers);
	while (1) {
		DBG("Health check ready");

		/* Infinite blocking call, waiting for transmission */
restart:
		ret = lttng_poll_wait(&events, -1);
		if (ret < 0) {
			/*
			 * Restart interrupted system call.
			 */
			if (errno == EINTR) {
				goto restart;
			}
			goto error;
		}

		nb_fd = ret;

		for (i = 0; i < nb_fd; i++) {
			/* Fetch once the poll data */
			revents = LTTNG_POLL_GETEV(&events, i);
			pollfd = LTTNG_POLL_GETFD(&events, i);

			/* Event on the registration socket */
			if (pollfd == sock) {
				if (revents & LPOLLIN) {
					continue;
				} else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
					ERR("Health socket poll error");
					goto error;
				} else {
					ERR("Unexpected poll events %u for sock %d", revents, pollfd);
					goto error;
				}
			} else {
				/* Event on the thread's quit pipe. */
				err = 0;
				goto exit;
			}
		}

		new_sock = lttcomm_accept_unix_sock(sock);
		if (new_sock < 0) {
			goto error;
		}

		/*
		 * Set the CLOEXEC flag. Return code is useless because either way, the
		 * show must go on.
		 */
		(void) utils_set_fd_cloexec(new_sock);

		DBG("Receiving data from client for health...");
		ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
		if (ret <= 0) {
			DBG("Nothing recv() from client... continuing");
			ret = close(new_sock);
			if (ret) {
				PERROR("close");
			}
			continue;
		}

		rcu_thread_online();

		memset(&reply, 0, sizeof(reply));
		for (i = 0; i < NR_HEALTH_SESSIOND_TYPES; i++) {
			/*
			 * health_check_state returns 0 if health is
			 * bad.
			 */
			if (!health_check_state(health_sessiond, i)) {
				reply.ret_code |= 1ULL << i;
			}
		}

		DBG2("Health check return value %" PRIx64, reply.ret_code);

		ret = lttcomm_send_unix_sock(new_sock, (void *) &reply,
				sizeof(reply));
		if (ret < 0) {
			ERR("Failed to send health data back to client");
		}

		/* End of transmission */
		ret = close(new_sock);
		if (ret) {
			PERROR("close");
		}
	}

exit:
error:
	if (err) {
		ERR("Health error occurred in %s", __func__);
	}
	DBG("Health check thread dying");
	unlink(config.health_unix_sock_path.value);
	if (sock >= 0) {
		ret = close(sock);
		if (ret) {
			PERROR("close");
		}
	}

	lttng_poll_clean(&events);
	rcu_unregister_thread();
	return NULL;
}
void *thread_ht_cleanup(void *data)
{
	int ret, i, pollfd, err = -1;
	ssize_t size_ret;
	uint32_t revents, nb_fd;
	struct lttng_poll_event events;

	DBG("[ht-thread] startup.");

	rcu_register_thread();
	rcu_thread_online();

	health_register(health_sessiond, HEALTH_SESSIOND_TYPE_HT_CLEANUP);

	health_code_update();

	ret = sessiond_set_thread_pollset(&events, 2);
	if (ret < 0) {
		goto error_poll_create;
	}

	/* Add pipe to the pollset. */
	ret = lttng_poll_add(&events, ht_cleanup_pipe[0], LPOLLIN | LPOLLERR);
	if (ret < 0) {
		goto error;
	}

	health_code_update();

	while (1) {
		DBG3("[ht-thread] Polling on %d fds.",
			LTTNG_POLL_GETNB(&events));

		/* Inifinite blocking call, waiting for transmission */
restart:
		health_poll_entry();
		ret = lttng_poll_wait(&events, -1);
		health_poll_exit();
		if (ret < 0) {
			/*
			 * Restart interrupted system call.
			 */
			if (errno == EINTR) {
				goto restart;
			}
			goto error;
		}

		nb_fd = ret;

		for (i = 0; i < nb_fd; i++) {
			struct lttng_ht *ht;

			health_code_update();

			/* Fetch once the poll data */
			revents = LTTNG_POLL_GETEV(&events, i);
			pollfd = LTTNG_POLL_GETFD(&events, i);

			/* Thread quit pipe has been closed. Killing thread. */
			ret = sessiond_check_thread_quit_pipe(pollfd, revents);
			if (ret) {
				err = 0;
				goto exit;
			}
			assert(pollfd == ht_cleanup_pipe[0]);

			if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
				ERR("ht cleanup pipe error");
				goto error;
			} else if (!(revents & LPOLLIN)) {
				/* No POLLIN and not a catched error, stop the thread. */
				ERR("ht cleanup failed. revent: %u", revents);
				goto error;
			}

			/* Get socket from dispatch thread. */
			size_ret = lttng_read(ht_cleanup_pipe[0], &ht,
					sizeof(ht));
			if (size_ret < sizeof(ht)) {
				PERROR("ht cleanup notify pipe");
				goto error;
			}
			health_code_update();
			/*
			 * The whole point of this thread is to call
			 * lttng_ht_destroy from a context that is NOT:
			 * 1) a read-side RCU lock,
			 * 2) a call_rcu thread.
			 */
			lttng_ht_destroy(ht);

			health_code_update();
		}
	}

exit:
error:
	lttng_poll_clean(&events);
error_poll_create:
	utils_close_pipe(ht_cleanup_pipe);
	ht_cleanup_pipe[0] = ht_cleanup_pipe[1] = -1;
	DBG("[ust-thread] cleanup complete.");
	if (err) {
		health_error();
		ERR("Health error occurred in %s", __func__);
	}
	health_unregister(health_sessiond);
	rcu_thread_offline();
	rcu_unregister_thread();
	return NULL;
}
Beispiel #7
0
/*
 * This thread manage application notify communication.
 */
void *ust_thread_manage_notify(void *data)
{
	int i, ret, pollfd, err = -1;
	ssize_t size_ret;
	uint32_t revents, nb_fd;
	struct lttng_poll_event events;

	DBG("[ust-thread] Manage application notify command");

	rcu_register_thread();
	rcu_thread_online();

	health_register(health_sessiond,
		HEALTH_SESSIOND_TYPE_APP_MANAGE_NOTIFY);

	if (testpoint(sessiond_thread_app_manage_notify)) {
		goto error_testpoint;
	}

	health_code_update();

	ret = sessiond_set_thread_pollset(&events, 2);
	if (ret < 0) {
		goto error_poll_create;
	}

	/* Add notify pipe to the pollset. */
	ret = lttng_poll_add(&events, apps_cmd_notify_pipe[0], LPOLLIN | LPOLLERR);
	if (ret < 0) {
		goto error;
	}

	health_code_update();

	while (1) {
		DBG3("[ust-thread] Manage notify polling on %d fds",
				LTTNG_POLL_GETNB(&events));

		/* Inifinite blocking call, waiting for transmission */
restart:
		health_poll_entry();
		ret = lttng_poll_wait(&events, -1);
		health_poll_exit();
		if (ret < 0) {
			/*
			 * Restart interrupted system call.
			 */
			if (errno == EINTR) {
				goto restart;
			}
			goto error;
		}

		nb_fd = ret;

		for (i = 0; i < nb_fd; i++) {
			health_code_update();

			/* Fetch once the poll data */
			revents = LTTNG_POLL_GETEV(&events, i);
			pollfd = LTTNG_POLL_GETFD(&events, i);

			/* Thread quit pipe has been closed. Killing thread. */
			ret = sessiond_check_thread_quit_pipe(pollfd, revents);
			if (ret) {
				err = 0;
				goto exit;
			}

			/* Inspect the apps cmd pipe */
			if (pollfd == apps_cmd_notify_pipe[0]) {
				int sock;

				if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
					ERR("Apps notify command pipe error");
					goto error;
				} else if (!(revents & LPOLLIN)) {
					/* No POLLIN and not a catched error, stop the thread. */
					ERR("Notify command pipe failed. revent: %u", revents);
					goto error;
				}

				/* Get socket from dispatch thread. */
				size_ret = lttng_read(apps_cmd_notify_pipe[0],
						&sock, sizeof(sock));
				if (size_ret < sizeof(sock)) {
					PERROR("read apps notify pipe");
					goto error;
				}
				health_code_update();

				ret = lttng_poll_add(&events, sock,
						LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP);
				if (ret < 0) {
					/*
					 * It's possible we've reached the max poll fd allowed.
					 * Let's close the socket but continue normal execution.
					 */
					ret = close(sock);
					if (ret) {
						PERROR("close notify socket %d", sock);
					}
					lttng_fd_put(LTTNG_FD_APPS, 1);
					continue;
				}
				DBG3("UST thread notify added sock %d to pollset", sock);
			} else {
				/*
				 * At this point, we know that a registered application
				 * triggered the event.
				 */
				if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
					/* Removing from the poll set */
					ret = lttng_poll_del(&events, pollfd);
					if (ret < 0) {
						goto error;
					}

					/* The socket is closed after a grace period here. */
					ust_app_notify_sock_unregister(pollfd);
				} else if (revents & (LPOLLIN | LPOLLPRI)) {
					ret = ust_app_recv_notify(pollfd);
					if (ret < 0) {
						/*
						 * If the notification failed either the application is
						 * dead or an internal error happened. In both cases,
						 * we can only continue here. If the application is
						 * dead, an unregistration will follow or else the
						 * application will notice that we are not responding
						 * on that socket and will close it.
						 */
						continue;
					}
				} else {
					ERR("Unknown poll events %u for sock %d", revents, pollfd);
					continue;
				}
				health_code_update();
			}
		}
	}

exit:
error:
	lttng_poll_clean(&events);
error_poll_create:
error_testpoint:
	utils_close_pipe(apps_cmd_notify_pipe);
	apps_cmd_notify_pipe[0] = apps_cmd_notify_pipe[1] = -1;
	DBG("Application notify communication apps thread cleanup complete");
	if (err) {
		health_error();
		ERR("Health error occurred in %s", __func__);
	}
	health_unregister(health_sessiond);
	rcu_thread_offline();
	rcu_unregister_thread();
	return NULL;
}