/* * Populates the reception buffer with the next complete message. * The caller must acquire the client's lock. */ static int receive_message(struct lttng_notification_channel *channel) { ssize_t ret; struct lttng_notification_channel_message msg; ret = lttng_dynamic_buffer_set_size(&channel->reception_buffer, 0); if (ret) { goto error; } ret = lttcomm_recv_unix_sock(channel->socket, &msg, sizeof(msg)); if (ret <= 0) { ret = -1; goto error; } if (msg.size > DEFAULT_MAX_NOTIFICATION_CLIENT_MESSAGE_PAYLOAD_SIZE) { ret = -1; goto error; } /* Add message header at buffer's start. */ ret = lttng_dynamic_buffer_append(&channel->reception_buffer, &msg, sizeof(msg)); if (ret) { goto error; } /* Reserve space for the payload. */ ret = lttng_dynamic_buffer_set_size(&channel->reception_buffer, channel->reception_buffer.size + msg.size); if (ret) { goto error; } /* Receive message payload. */ ret = lttcomm_recv_unix_sock(channel->socket, channel->reception_buffer.data + sizeof(msg), msg.size); if (ret < (ssize_t) msg.size) { ret = -1; goto error; } ret = 0; end: return ret; error: if (lttng_dynamic_buffer_set_size(&channel->reception_buffer, 0)) { ret = -1; } goto end; }
static int run_as_cmd(struct run_as_worker *worker, enum run_as_cmd cmd, struct run_as_data *data, uid_t uid, gid_t gid) { ssize_t readlen, writelen; struct run_as_ret recvret; /* * If we are non-root, we can only deal with our own uid. */ if (geteuid() != 0) { if (uid != geteuid()) { recvret.ret = -1; recvret._errno = EPERM; ERR("Client (%d)/Server (%d) UID mismatch (and sessiond is not root)", (int) uid, (int) geteuid()); goto end; } } data->cmd = cmd; data->uid = uid; data->gid = gid; writelen = lttcomm_send_unix_sock(worker->sockpair[0], data, sizeof(*data)); if (writelen < sizeof(*data)) { PERROR("Error writing message to run_as"); recvret.ret = -1; recvret._errno = errno; goto end; } /* receive return value */ readlen = lttcomm_recv_unix_sock(worker->sockpair[0], &recvret, sizeof(recvret)); if (!readlen) { ERR("Run-as worker has hung-up during run_as_cmd"); recvret.ret = -1; recvret._errno = EIO; goto end; } else if (readlen < sizeof(recvret)) { PERROR("Error reading response from run_as"); recvret.ret = -1; recvret._errno = errno; } if (do_recv_fd(worker, cmd, &recvret.ret)) { recvret.ret = -1; recvret._errno = EIO; } end: errno = recvret._errno; return recvret.ret; }
/* * Receive data from the sessiond socket. * * On success, returns the number of bytes received (>=0) * On error, returns -1 (recvmsg() error) or -ENOTCONN */ static int recv_data_sessiond(void *buf, size_t len) { int ret; if (!connected) { ret = -LTTNG_ERR_NO_SESSIOND; goto end; } ret = lttcomm_recv_unix_sock(sessiond_socket, buf, len); if (ret < 0) { ret = -LTTNG_ERR_FATAL; } end: return ret; }
/* * Check session daemon health for a specific health component. * * Return 0 if health is OK or else 1 if BAD. * * Any other negative value is a lttng error code which can be translated with * lttng_strerror(). */ int lttng_health_check(enum lttng_health_component c) { int sock, ret; struct lttcomm_health_msg msg; struct lttcomm_health_data reply; /* Connect to the sesssion daemon */ sock = lttcomm_connect_unix_sock(health_sock_path); if (sock < 0) { ret = -LTTNG_ERR_NO_SESSIOND; goto error; } msg.cmd = LTTNG_HEALTH_CHECK; msg.component = c; ret = lttcomm_send_unix_sock(sock, (void *)&msg, sizeof(msg)); if (ret < 0) { ret = -LTTNG_ERR_FATAL; goto close_error; } ret = lttcomm_recv_unix_sock(sock, (void *)&reply, sizeof(reply)); if (ret < 0) { ret = -LTTNG_ERR_FATAL; goto close_error; } ret = reply.ret_code; close_error: close(sock); error: return ret; }
/* * Thread managing health check socket. */ void *thread_manage_health(void *data) { int sock = -1, new_sock = -1, ret, i, pollfd, err = -1; uint32_t revents, nb_fd; struct lttng_poll_event events; struct health_comm_msg msg; struct health_comm_reply reply; int is_root; DBG("[thread] Manage health check started"); setup_health_path(); rcu_register_thread(); /* We might hit an error path before this is created. */ lttng_poll_init(&events); /* Create unix socket */ sock = lttcomm_create_unix_sock(health_unix_sock_path); if (sock < 0) { ERR("Unable to create health check Unix socket"); ret = -1; goto error; } is_root = !getuid(); if (is_root) { /* lttng health client socket path permissions */ ret = chown(health_unix_sock_path, 0, utils_get_group_id(tracing_group_name)); if (ret < 0) { ERR("Unable to set group on %s", health_unix_sock_path); PERROR("chown"); ret = -1; goto error; } ret = chmod(health_unix_sock_path, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP); if (ret < 0) { ERR("Unable to set permissions on %s", health_unix_sock_path); PERROR("chmod"); ret = -1; goto error; } } /* * Set the CLOEXEC flag. Return code is useless because either way, the * show must go on. */ (void) utils_set_fd_cloexec(sock); ret = lttcomm_listen_unix_sock(sock); if (ret < 0) { goto error; } /* Size is set to 1 for the consumer_channel pipe */ ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC); if (ret < 0) { ERR("Poll set creation failed"); goto error; } ret = lttng_poll_add(&events, health_quit_pipe[0], LPOLLIN); if (ret < 0) { goto error; } /* Add the application registration socket */ ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI); if (ret < 0) { goto error; } /* Perform prior memory accesses before decrementing ready */ cmm_smp_mb__before_uatomic_dec(); uatomic_dec(<tng_consumer_ready); while (1) { DBG("Health check ready"); /* Inifinite blocking call, waiting for transmission */ restart: ret = lttng_poll_wait(&events, -1); if (ret < 0) { /* * Restart interrupted system call. */ if (errno == EINTR) { goto restart; } goto error; } nb_fd = ret; for (i = 0; i < nb_fd; i++) { /* Fetch once the poll data */ revents = LTTNG_POLL_GETEV(&events, i); pollfd = LTTNG_POLL_GETFD(&events, i); if (!revents) { /* No activity for this FD (poll implementation). */ continue; } /* Thread quit pipe has been closed. Killing thread. */ ret = check_health_quit_pipe(pollfd, revents); if (ret) { err = 0; goto exit; } /* Event on the registration socket */ if (pollfd == sock) { if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP) && !(revents & LPOLLIN)) { ERR("Health socket poll error"); goto error; } } } new_sock = lttcomm_accept_unix_sock(sock); if (new_sock < 0) { goto error; } /* * Set the CLOEXEC flag. Return code is useless because either way, the * show must go on. */ (void) utils_set_fd_cloexec(new_sock); DBG("Receiving data from client for health..."); ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg)); if (ret <= 0) { DBG("Nothing recv() from client... continuing"); ret = close(new_sock); if (ret) { PERROR("close"); } new_sock = -1; continue; } rcu_thread_online(); assert(msg.cmd == HEALTH_CMD_CHECK); memset(&reply, 0, sizeof(reply)); for (i = 0; i < NR_HEALTH_CONSUMERD_TYPES; i++) { /* * health_check_state return 0 if thread is in * error. */ if (!health_check_state(health_consumerd, i)) { reply.ret_code |= 1ULL << i; } } DBG("Health check return value %" PRIx64, reply.ret_code); ret = send_unix_sock(new_sock, (void *) &reply, sizeof(reply)); if (ret < 0) { ERR("Failed to send health data back to client"); } /* End of transmission */ ret = close(new_sock); if (ret) { PERROR("close"); } new_sock = -1; } exit: error: if (err) { ERR("Health error occurred in %s", __func__); } DBG("Health check thread dying"); unlink(health_unix_sock_path); if (sock >= 0) { ret = close(sock); if (ret) { PERROR("close"); } } lttng_poll_clean(&events); rcu_unregister_thread(); return NULL; }
static int run_as_create_worker_no_lock(const char *procname, post_fork_cleanup_cb clean_up_func, void *clean_up_user_data) { pid_t pid; int i, ret = 0; ssize_t readlen; struct run_as_ret recvret; struct run_as_worker *worker; assert(!global_worker); if (!use_clone()) { /* * Don't initialize a worker, all run_as tasks will be performed * in the current process. */ ret = 0; goto end; } worker = zmalloc(sizeof(*worker)); if (!worker) { ret = -ENOMEM; goto end; } worker->procname = strdup(procname); if (!worker->procname) { ret = -ENOMEM; goto error_procname_alloc; } /* Create unix socket. */ if (lttcomm_create_anon_unix_socketpair(worker->sockpair) < 0) { ret = -1; goto error_sock; } /* Fork worker. */ pid = fork(); if (pid < 0) { PERROR("fork"); ret = -1; goto error_fork; } else if (pid == 0) { /* Child */ reset_sighandler(); set_worker_sighandlers(); if (clean_up_func) { if (clean_up_func(clean_up_user_data) < 0) { ERR("Run-as post-fork clean-up failed, exiting."); exit(EXIT_FAILURE); } } /* Just close, no shutdown. */ if (close(worker->sockpair[0])) { PERROR("close"); exit(EXIT_FAILURE); } /* * Close all FDs aside from STDIN, STDOUT, STDERR and sockpair[1] * Sockpair[1] is used as a control channel with the master */ for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) { if (i != worker->sockpair[1]) { (void) close(i); } } worker->sockpair[0] = -1; ret = run_as_worker(worker); if (lttcomm_close_unix_sock(worker->sockpair[1])) { PERROR("close"); ret = -1; } worker->sockpair[1] = -1; free(worker->procname); free(worker); LOG(ret ? PRINT_ERR : PRINT_DBG, "run_as worker exiting (ret = %d)", ret); exit(ret ? EXIT_FAILURE : EXIT_SUCCESS); } else { /* Parent */ /* Just close, no shutdown. */ if (close(worker->sockpair[1])) { PERROR("close"); ret = -1; goto error_fork; } worker->sockpair[1] = -1; worker->pid = pid; /* Wait for worker to become ready. */ readlen = lttcomm_recv_unix_sock(worker->sockpair[0], &recvret, sizeof(recvret)); if (readlen < sizeof(recvret)) { ERR("readlen: %zd", readlen); PERROR("Error reading response from run_as at creation"); ret = -1; goto error_fork; } global_worker = worker; } end: return ret; /* Error handling. */ error_fork: for (i = 0; i < 2; i++) { if (worker->sockpair[i] < 0) { continue; } if (lttcomm_close_unix_sock(worker->sockpair[i])) { PERROR("close"); } worker->sockpair[i] = -1; } error_sock: free(worker->procname); error_procname_alloc: free(worker); return ret; }
static int run_as_cmd(struct run_as_worker *worker, enum run_as_cmd cmd, struct run_as_data *data, struct run_as_ret *ret_value, uid_t uid, gid_t gid) { int ret = 0; ssize_t readlen, writelen; /* * If we are non-root, we can only deal with our own uid. */ if (geteuid() != 0) { if (uid != geteuid()) { ret = -1; ret_value->_errno = EPERM; ERR("Client (%d)/Server (%d) UID mismatch (and sessiond is not root)", (int) uid, (int) geteuid()); goto end; } } data->cmd = cmd; data->uid = uid; data->gid = gid; /* * Stage 1: Send the run_as_data struct to the worker process */ writelen = lttcomm_send_unix_sock(worker->sockpair[0], data, sizeof(*data)); if (writelen < sizeof(*data)) { PERROR("Error writing message to run_as"); ret = -1; ret_value->_errno = EIO; goto end; } /* * Stage 2: Send file descriptor to the worker process if needed */ ret = send_fd_to_worker(worker, data->cmd, data->fd); if (ret) { PERROR("do_send_fd error"); ret = -1; ret_value->_errno = EIO; goto end; } /* * Stage 3: Wait for the execution of the command */ /* * Stage 4: Receive the run_as_ret struct containing the return value and * errno */ readlen = lttcomm_recv_unix_sock(worker->sockpair[0], ret_value, sizeof(*ret_value)); if (!readlen) { ERR("Run-as worker has hung-up during run_as_cmd"); ret = -1; ret_value->_errno = EIO; goto end; } else if (readlen < sizeof(*ret_value)) { PERROR("Error reading response from run_as"); ret = -1; ret_value->_errno = errno; goto end; } if (ret_value->_error) { /* Skip stage 5 on error as there will be no fd to receive. */ goto end; } /* * Stage 5: Receive file descriptor if needed */ ret = recv_fd_from_worker(worker, data->cmd, &ret_value->fd); if (ret < 0) { ERR("Error receiving fd"); ret = -1; ret_value->_errno = EIO; } end: return ret; }
/* * Return < 0 on error, 0 if OK, 1 on hangup. */ static int handle_one_cmd(struct run_as_worker *worker) { int ret = 0; struct run_as_data data; ssize_t readlen, writelen; struct run_as_ret sendret; run_as_fct cmd; uid_t prev_euid; memset(&sendret, 0, sizeof(sendret)); sendret.fd = -1; /* * Stage 1: Receive run_as_data struct from the master. * The structure contains the command type and all the parameters needed for * its execution */ readlen = lttcomm_recv_unix_sock(worker->sockpair[1], &data, sizeof(data)); if (readlen == 0) { /* hang up */ ret = 1; goto end; } if (readlen < sizeof(data)) { PERROR("lttcomm_recv_unix_sock error"); ret = -1; goto end; } cmd = run_as_enum_to_fct(data.cmd); if (!cmd) { ret = -1; goto end; } /* * Stage 2: Receive file descriptor from master. * Some commands need a file descriptor as input so if it's needed we * receive the fd using the Unix socket. */ ret = recv_fd_from_master(worker, data.cmd, &data.fd); if (ret < 0) { PERROR("recv_fd_from_master error"); ret = -1; goto end; } prev_euid = getuid(); if (data.gid != getegid()) { ret = setegid(data.gid); if (ret < 0) { sendret._error = true; sendret._errno = errno; PERROR("setegid"); goto write_return; } } if (data.uid != prev_euid) { ret = seteuid(data.uid); if (ret < 0) { sendret._error = true; sendret._errno = errno; PERROR("seteuid"); goto write_return; } } /* * Also set umask to 0 for mkdir executable bit. */ umask(0); /* * Stage 3: Execute the command */ ret = (*cmd)(&data, &sendret); if (ret < 0) { DBG("Execution of command returned an error"); } write_return: ret = cleanup_received_fd(data.cmd, data.fd); if (ret < 0) { ERR("Error cleaning up FD"); goto end; } /* * Stage 4: Send run_as_ret structure to the master. * This structure contain the return value of the command and the errno. */ writelen = lttcomm_send_unix_sock(worker->sockpair[1], &sendret, sizeof(sendret)); if (writelen < sizeof(sendret)) { PERROR("lttcomm_send_unix_sock error"); ret = -1; goto end; } /* * Stage 5: Send file descriptor to the master * Some commands return a file descriptor so if it's needed we pass it back * to the master using the Unix socket. */ ret = send_fd_to_master(worker, data.cmd, sendret.fd); if (ret < 0) { DBG("Sending FD to master returned an error"); goto end; } if (seteuid(prev_euid) < 0) { PERROR("seteuid"); ret = -1; goto end; } ret = 0; end: return ret; }
LTTNG_HIDDEN int run_as_create_worker(char *procname) { pid_t pid; int i, ret = 0; ssize_t readlen; struct run_as_ret recvret; struct run_as_worker *worker; pthread_mutex_lock(&worker_lock); assert(!global_worker); if (!use_clone()) { /* * Don't initialize a worker, all run_as tasks will be performed * in the current process. */ ret = 0; goto end; } worker = zmalloc(sizeof(*worker)); if (!worker) { ret = -ENOMEM; goto end; } worker->procname = procname; /* Create unix socket. */ if (lttcomm_create_anon_unix_socketpair(worker->sockpair) < 0) { ret = -1; goto error_sock; } /* Fork worker. */ pid = fork(); if (pid < 0) { PERROR("fork"); ret = -1; goto error_fork; } else if (pid == 0) { /* Child */ reset_sighandler(); set_worker_sighandlers(); /* The child has no use for this lock. */ pthread_mutex_unlock(&worker_lock); /* Just close, no shutdown. */ if (close(worker->sockpair[0])) { PERROR("close"); exit(EXIT_FAILURE); } worker->sockpair[0] = -1; ret = run_as_worker(worker); if (lttcomm_close_unix_sock(worker->sockpair[1])) { PERROR("close"); ret = -1; } worker->sockpair[1] = -1; LOG(ret ? PRINT_ERR : PRINT_DBG, "run_as worker exiting (ret = %d)", ret); exit(ret ? EXIT_FAILURE : EXIT_SUCCESS); } else { /* Parent */ /* Just close, no shutdown. */ if (close(worker->sockpair[1])) { PERROR("close"); ret = -1; goto error_fork; } worker->sockpair[1] = -1; worker->pid = pid; /* Wait for worker to become ready. */ readlen = lttcomm_recv_unix_sock(worker->sockpair[0], &recvret, sizeof(recvret)); if (readlen < sizeof(recvret)) { ERR("readlen: %zd", readlen); PERROR("Error reading response from run_as at creation"); ret = -1; goto error_fork; } global_worker = worker; } end: pthread_mutex_unlock(&worker_lock); return ret; /* Error handling. */ error_fork: for (i = 0; i < 2; i++) { if (worker->sockpair[i] < 0) { continue; } if (lttcomm_close_unix_sock(worker->sockpair[i])) { PERROR("close"); } worker->sockpair[i] = -1; } error_sock: free(worker); pthread_mutex_unlock(&worker_lock); return ret; }
/* * Return < 0 on error, 0 if OK, 1 on hangup. */ static int handle_one_cmd(struct run_as_worker *worker) { int ret = 0; struct run_as_data data; ssize_t readlen, writelen; struct run_as_ret sendret; run_as_fct cmd; uid_t prev_euid; /* Read data */ readlen = lttcomm_recv_unix_sock(worker->sockpair[1], &data, sizeof(data)); if (readlen == 0) { /* hang up */ ret = 1; goto end; } if (readlen < sizeof(data)) { PERROR("lttcomm_recv_unix_sock error"); ret = -1; goto end; } cmd = run_as_enum_to_fct(data.cmd); if (!cmd) { ret = -1; goto end; } prev_euid = getuid(); if (data.gid != getegid()) { ret = setegid(data.gid); if (ret < 0) { PERROR("setegid"); goto write_return; } } if (data.uid != prev_euid) { ret = seteuid(data.uid); if (ret < 0) { PERROR("seteuid"); goto write_return; } } /* * Also set umask to 0 for mkdir executable bit. */ umask(0); ret = (*cmd)(&data); write_return: sendret.ret = ret; sendret._errno = errno; /* send back return value */ writelen = lttcomm_send_unix_sock(worker->sockpair[1], &sendret, sizeof(sendret)); if (writelen < sizeof(sendret)) { PERROR("lttcomm_send_unix_sock error"); ret = -1; goto end; } ret = do_send_fd(worker, data.cmd, ret); if (ret) { PERROR("do_send_fd error"); ret = -1; goto end; } if (seteuid(prev_euid) < 0) { PERROR("seteuid"); ret = -1; goto end; } ret = 0; end: return ret; }
/* * Thread managing health check socket. */ static void *thread_manage_health(void *data) { const bool is_root = (getuid() == 0); int sock = -1, new_sock = -1, ret, i, pollfd, err = -1; uint32_t revents, nb_fd; struct lttng_poll_event events; struct health_comm_msg msg; struct health_comm_reply reply; /* Thread-specific quit pipe. */ struct thread_notifiers *notifiers = data; const int quit_pipe_read_fd = lttng_pipe_get_readfd( notifiers->quit_pipe); DBG("[thread] Manage health check started"); rcu_register_thread(); /* * Created with a size of two for: * - client socket * - thread quit pipe */ ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC); if (ret < 0) { goto error; } /* Create unix socket */ sock = lttcomm_create_unix_sock(config.health_unix_sock_path.value); if (sock < 0) { ERR("Unable to create health check Unix socket"); goto error; } if (is_root) { /* lttng health client socket path permissions */ gid_t gid; ret = utils_get_group_id(config.tracing_group_name.value, true, &gid); if (ret) { /* Default to root group. */ gid = 0; } ret = chown(config.health_unix_sock_path.value, 0, gid); if (ret < 0) { ERR("Unable to set group on %s", config.health_unix_sock_path.value); PERROR("chown"); goto error; } ret = chmod(config.health_unix_sock_path.value, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP); if (ret < 0) { ERR("Unable to set permissions on %s", config.health_unix_sock_path.value); PERROR("chmod"); goto error; } } /* * Set the CLOEXEC flag. Return code is useless because either way, the * show must go on. */ (void) utils_set_fd_cloexec(sock); ret = lttcomm_listen_unix_sock(sock); if (ret < 0) { goto error; } ret = lttng_poll_add(&events, quit_pipe_read_fd, LPOLLIN | LPOLLERR); if (ret < 0) { goto error; } /* Add the application registration socket */ ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI); if (ret < 0) { goto error; } mark_thread_as_ready(notifiers); while (1) { DBG("Health check ready"); /* Infinite blocking call, waiting for transmission */ restart: ret = lttng_poll_wait(&events, -1); if (ret < 0) { /* * Restart interrupted system call. */ if (errno == EINTR) { goto restart; } goto error; } nb_fd = ret; for (i = 0; i < nb_fd; i++) { /* Fetch once the poll data */ revents = LTTNG_POLL_GETEV(&events, i); pollfd = LTTNG_POLL_GETFD(&events, i); /* Event on the registration socket */ if (pollfd == sock) { if (revents & LPOLLIN) { continue; } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { ERR("Health socket poll error"); goto error; } else { ERR("Unexpected poll events %u for sock %d", revents, pollfd); goto error; } } else { /* Event on the thread's quit pipe. */ err = 0; goto exit; } } new_sock = lttcomm_accept_unix_sock(sock); if (new_sock < 0) { goto error; } /* * Set the CLOEXEC flag. Return code is useless because either way, the * show must go on. */ (void) utils_set_fd_cloexec(new_sock); DBG("Receiving data from client for health..."); ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg)); if (ret <= 0) { DBG("Nothing recv() from client... continuing"); ret = close(new_sock); if (ret) { PERROR("close"); } continue; } rcu_thread_online(); memset(&reply, 0, sizeof(reply)); for (i = 0; i < NR_HEALTH_SESSIOND_TYPES; i++) { /* * health_check_state returns 0 if health is * bad. */ if (!health_check_state(health_sessiond, i)) { reply.ret_code |= 1ULL << i; } } DBG2("Health check return value %" PRIx64, reply.ret_code); ret = lttcomm_send_unix_sock(new_sock, (void *) &reply, sizeof(reply)); if (ret < 0) { ERR("Failed to send health data back to client"); } /* End of transmission */ ret = close(new_sock); if (ret) { PERROR("close"); } } exit: error: if (err) { ERR("Health error occurred in %s", __func__); } DBG("Health check thread dying"); unlink(config.health_unix_sock_path.value); if (sock >= 0) { ret = close(sock); if (ret) { PERROR("close"); } } lttng_poll_clean(&events); rcu_unregister_thread(); return NULL; }