bool setup_named_pipe_socket(const char *pipe_name, struct tevent_context *ev_ctx, struct messaging_context *msg_ctx) { struct dcerpc_ncacn_listen_state *state; struct tevent_fd *fde; int rc; state = talloc(ev_ctx, struct dcerpc_ncacn_listen_state); if (!state) { DEBUG(0, ("Out of memory\n")); return false; } state->ep.name = talloc_strdup(state, pipe_name); if (state->ep.name == NULL) { DEBUG(0, ("Out of memory\n")); goto out; } state->fd = create_named_pipe_socket(pipe_name); if (state->fd == -1) { goto out; } rc = listen(state->fd, 5); if (rc < 0) { DEBUG(0, ("Failed to listen on pipe socket %s: %s\n", pipe_name, strerror(errno))); goto out; } state->ev_ctx = ev_ctx; state->msg_ctx = msg_ctx; DEBUG(10, ("Openened pipe socket fd %d for %s\n", state->fd, pipe_name)); fde = tevent_add_fd(ev_ctx, state, state->fd, TEVENT_FD_READ, named_pipe_listener, state); if (!fde) { DEBUG(0, ("Failed to add event handler!\n")); goto out; } tevent_fd_set_auto_close(fde); return true; out: if (state->fd != -1) { close(state->fd); } TALLOC_FREE(state); return false; }
/* setup the fd used by the queue */ int ctdb_queue_set_fd(struct ctdb_queue *queue, int fd) { queue->fd = fd; talloc_free(queue->fde); queue->fde = NULL; if (fd != -1) { queue->fde = event_add_fd(queue->ctdb->ev, queue, fd, EVENT_FD_READ, queue_io_handler, queue); if (queue->fde == NULL) { return -1; } tevent_fd_set_auto_close(queue->fde); if (queue->out_queue) { EVENT_FD_WRITEABLE(queue->fde); } } return 0; }
static bool smbd_scavenger_start(struct smbd_scavenger_state *state) { struct server_id self = messaging_server_id(state->msg); struct tevent_fd *fde = NULL; int fds[2]; int ret; uint64_t unique_id; bool ok; SMB_ASSERT(server_id_equal(&state->parent_id, &self)); if (smbd_scavenger_running(state)) { DEBUG(10, ("scavenger %s already running\n", server_id_str(talloc_tos(), state->scavenger_id))); return true; } if (state->scavenger_id != NULL) { DEBUG(10, ("scavenger zombie %s, cleaning up\n", server_id_str(talloc_tos(), state->scavenger_id))); TALLOC_FREE(state->scavenger_id); } state->scavenger_id = talloc_zero(state, struct server_id); if (state->scavenger_id == NULL) { DEBUG(2, ("Out of memory\n")); goto fail; } talloc_set_destructor(state->scavenger_id, smbd_scavenger_server_id_destructor); ret = socketpair(AF_UNIX, SOCK_STREAM, 0, fds); if (ret == -1) { DEBUG(2, ("socketpair failed: %s", strerror(errno))); goto fail; } smb_set_close_on_exec(fds[0]); smb_set_close_on_exec(fds[1]); unique_id = serverid_get_random_unique_id(); ret = fork(); if (ret == -1) { int err = errno; close(fds[0]); close(fds[1]); DEBUG(0, ("fork failed: %s", strerror(err))); goto fail; } if (ret == 0) { /* child */ NTSTATUS status; close(fds[0]); am_parent = NULL; set_my_unique_id(unique_id); status = reinit_after_fork(state->msg, state->ev, true); if (!NT_STATUS_IS_OK(status)) { DEBUG(2, ("reinit_after_fork failed: %s\n", nt_errstr(status))); exit_server("reinit_after_fork failed"); return false; } prctl_set_comment("smbd-scavenger"); state->am_scavenger = true; *state->scavenger_id = messaging_server_id(state->msg); scavenger_setup_sig_term_handler(state->ev); serverid_register(*state->scavenger_id, FLAG_MSG_GENERAL); ok = scavenger_say_hello(fds[1], *state->scavenger_id); if (!ok) { DEBUG(2, ("scavenger_say_hello failed\n")); exit_server("scavenger_say_hello failed"); return false; } fde = tevent_add_fd(state->ev, state->scavenger_id, fds[1], TEVENT_FD_READ, smbd_scavenger_parent_dead, state); if (fde == NULL) { DEBUG(2, ("tevent_add_fd(smbd_scavenger_parent_dead) " "failed\n")); exit_server("tevent_add_fd(smbd_scavenger_parent_dead) " "failed"); return false; } tevent_fd_set_auto_close(fde); ret = smbd_scavenger_main(state); DEBUG(10, ("scavenger ended: %d\n", ret)); exit_server_cleanly("scavenger ended"); return false; } /* parent */ close(fds[1]); ok = scavenger_wait_hello(fds[0], state->scavenger_id); if (!ok) { close(fds[0]); goto fail; } fde = tevent_add_fd(state->ev, state->scavenger_id, fds[0], TEVENT_FD_READ, smbd_scavenger_done, state); if (fde == NULL) { close(fds[0]); goto fail; } tevent_fd_set_auto_close(fde); return true; fail: TALLOC_FREE(state->scavenger_id); return false; }
/* run a command as a child process, with a timeout. any stdout/stderr from the child will appear in the Samba logs with the specified log levels */ struct tevent_req *samba_runcmd_send(TALLOC_CTX *mem_ctx, struct tevent_context *ev, struct timeval endtime, int stdout_log_level, int stderr_log_level, const char * const *argv0, ...) { struct tevent_req *req; struct samba_runcmd_state *state; int p1[2], p2[2], p3[2]; char **argv; va_list ap; if (argv0 == NULL) { return NULL; } req = tevent_req_create(mem_ctx, &state, struct samba_runcmd_state); if (req == NULL) { return NULL; } state->stdout_log_level = stdout_log_level; state->stderr_log_level = stderr_log_level; state->fd_stdin = -1; state->arg0 = talloc_strdup(state, argv0[0]); if (tevent_req_nomem(state->arg0, req)) { return tevent_req_post(req, ev); } if (pipe(p1) != 0) { tevent_req_error(req, errno); return tevent_req_post(req, ev); } if (pipe(p2) != 0) { close(p1[0]); close(p1[1]); tevent_req_error(req, errno); return tevent_req_post(req, ev); } if (pipe(p3) != 0) { close(p1[0]); close(p1[1]); close(p2[0]); close(p2[1]); tevent_req_error(req, errno); return tevent_req_post(req, ev); } state->tfork = tfork_create(); if (state->tfork == NULL) { close(p1[0]); close(p1[1]); close(p2[0]); close(p2[1]); close(p3[0]); close(p3[1]); tevent_req_error(req, errno); return tevent_req_post(req, ev); } state->pid = tfork_child_pid(state->tfork); if (state->pid != 0) { /* the parent */ close(p1[1]); close(p2[1]); close(p3[0]); state->fd_stdout = p1[0]; state->fd_stderr = p2[0]; state->fd_stdin = p3[1]; state->fd_status = tfork_event_fd(state->tfork); set_blocking(state->fd_stdout, false); set_blocking(state->fd_stderr, false); set_blocking(state->fd_stdin, false); set_blocking(state->fd_status, false); smb_set_close_on_exec(state->fd_stdin); smb_set_close_on_exec(state->fd_stdout); smb_set_close_on_exec(state->fd_stderr); smb_set_close_on_exec(state->fd_status); tevent_req_set_cleanup_fn(req, samba_runcmd_cleanup_fn); state->fde_stdout = tevent_add_fd(ev, state, state->fd_stdout, TEVENT_FD_READ, samba_runcmd_io_handler, req); if (tevent_req_nomem(state->fde_stdout, req)) { close(state->fd_stdout); close(state->fd_stderr); close(state->fd_status); return tevent_req_post(req, ev); } tevent_fd_set_auto_close(state->fde_stdout); state->fde_stderr = tevent_add_fd(ev, state, state->fd_stderr, TEVENT_FD_READ, samba_runcmd_io_handler, req); if (tevent_req_nomem(state->fde_stdout, req)) { close(state->fd_stdout); close(state->fd_stderr); close(state->fd_status); return tevent_req_post(req, ev); } tevent_fd_set_auto_close(state->fde_stderr); state->fde_status = tevent_add_fd(ev, state, state->fd_status, TEVENT_FD_READ, samba_runcmd_io_handler, req); if (tevent_req_nomem(state->fde_stdout, req)) { close(state->fd_stdout); close(state->fd_stderr); close(state->fd_status); return tevent_req_post(req, ev); } tevent_fd_set_auto_close(state->fde_status); if (!timeval_is_zero(&endtime)) { tevent_req_set_endtime(req, ev, endtime); } return req; } /* the child */ close(p1[0]); close(p2[0]); close(p3[1]); close(0); close(1); close(2); /* we want to ensure that all of the network sockets we had open are closed */ tevent_re_initialise(ev); /* setup for logging to go to the parents debug log */ dup2(p3[0], 0); dup2(p1[1], 1); dup2(p2[1], 2); close(p1[1]); close(p2[1]); close(p3[0]); argv = str_list_copy(state, discard_const_p(const char *, argv0)); if (!argv) { fprintf(stderr, "Out of memory in child\n"); _exit(255); } va_start(ap, argv0); while (1) { const char **l; char *arg = va_arg(ap, char *); if (arg == NULL) break; l = discard_const_p(const char *, argv); l = str_list_add(l, arg); if (l == NULL) { fprintf(stderr, "Out of memory in child\n"); _exit(255); } argv = discard_const_p(char *, l); } va_end(ap); (void)execvp(state->arg0, argv); fprintf(stderr, "Failed to exec child - %s\n", strerror(errno)); _exit(255); return NULL; }
static struct standard_child_state *setup_standard_child_pipe(struct tevent_context *ev, const char *name) { struct standard_child_state *state; int parent_child_pipe[2]; int ret; /* * Prepare a pipe to allow us to know when the child exits, * because it will trigger a read event on this private * pipe. * * We do all this before the accept and fork(), so we can * clean up if it fails. */ state = talloc_zero(ev, struct standard_child_state); if (state == NULL) { return NULL; } if (name == NULL) { name = ""; } state->name = talloc_strdup(state, name); if (state->name == NULL) { TALLOC_FREE(state); return NULL; } ret = pipe(parent_child_pipe); if (ret == -1) { DEBUG(0, ("Failed to create parent-child pipe to handle " "SIGCHLD to track new process for socket\n")); TALLOC_FREE(state); return NULL; } smb_set_close_on_exec(parent_child_pipe[0]); smb_set_close_on_exec(parent_child_pipe[1]); state->from_child_fd = parent_child_pipe[0]; state->to_parent_fd = parent_child_pipe[1]; /* * The basic purpose of calling this handler is to ensure we * call waitpid() and so avoid zombies (now that we no longer * user SIGIGN on for SIGCHLD), but it also allows us to clean * up other resources in the future. */ state->from_child_fde = tevent_add_fd(ev, state, state->from_child_fd, TEVENT_FD_READ, standard_child_pipe_handler, state); if (state->from_child_fde == NULL) { TALLOC_FREE(state); return NULL; } tevent_fd_set_auto_close(state->from_child_fde); return state; }
static bool test_event_fd1(struct torture_context *tctx, const void *test_data) { struct test_event_fd1_state state; ZERO_STRUCT(state); state.tctx = tctx; state.backend = (const char *)test_data; state.ev = tevent_context_init_byname(tctx, state.backend); if (state.ev == NULL) { torture_skip(tctx, talloc_asprintf(tctx, "event backend '%s' not supported\n", state.backend)); return true; } tevent_set_debug_stderr(state.ev); torture_comment(tctx, "backend '%s' - %s\n", state.backend, __FUNCTION__); /* * This tests the following: * * It monitors the state of state.sock[0] * with tevent_fd, but we never read/write on state.sock[0] * while state.sock[1] * is only used to write a few bytes. * * We have a loop: * - we wait only for TEVENT_FD_WRITE on state.sock[0] * - we write 1 byte to state.sock[1] * - we wait only for TEVENT_FD_READ on state.sock[0] * - we disable events on state.sock[0] * - the timer event restarts the loop * Then we close state.sock[1] * We have a loop: * - we wait for TEVENT_FD_READ/WRITE on state.sock[0] * - we try to read 1 byte * - if the read gets an error of returns 0 * we disable the event handler * - the timer finishes the test */ state.sock[0] = -1; state.sock[1] = -1; socketpair(AF_UNIX, SOCK_STREAM, 0, state.sock); state.te = tevent_add_timer(state.ev, state.ev, timeval_current_ofs(0,1000), test_event_fd1_finished, &state); state.fde0 = tevent_add_fd(state.ev, state.ev, state.sock[0], TEVENT_FD_WRITE, test_event_fd1_fde_handler, &state); /* state.fde1 is only used to auto close */ state.fde1 = tevent_add_fd(state.ev, state.ev, state.sock[1], 0, test_event_fd1_fde_handler, &state); tevent_fd_set_auto_close(state.fde0); tevent_fd_set_auto_close(state.fde1); while (!state.finished) { errno = 0; if (tevent_loop_once(state.ev) == -1) { talloc_free(state.ev); torture_fail(tctx, talloc_asprintf(tctx, "Failed event loop %s\n", strerror(errno))); } } talloc_free(state.ev); torture_assert(tctx, state.error == NULL, talloc_asprintf(tctx, "%s", state.error)); return true; }
static bool test_event_fd2(struct torture_context *tctx, const void *test_data) { struct test_event_fd2_state state; int sock[2]; uint8_t c = 0; ZERO_STRUCT(state); state.tctx = tctx; state.backend = (const char *)test_data; state.ev = tevent_context_init_byname(tctx, state.backend); if (state.ev == NULL) { torture_skip(tctx, talloc_asprintf(tctx, "event backend '%s' not supported\n", state.backend)); return true; } tevent_set_debug_stderr(state.ev); torture_comment(tctx, "backend '%s' - %s\n", state.backend, __FUNCTION__); /* * This tests the following * * - We write 1 byte to each socket * - We wait for TEVENT_FD_READ/WRITE on both sockets * - When we get TEVENT_FD_WRITE we write 1 byte * until both socket buffers are full, which * means both sockets only get TEVENT_FD_READ. * - Then we read 1 byte until we have consumed * all bytes the other end has written. */ sock[0] = -1; sock[1] = -1; socketpair(AF_UNIX, SOCK_STREAM, 0, sock); /* * the timer should never expire */ state.te = tevent_add_timer(state.ev, state.ev, timeval_current_ofs(600, 0), test_event_fd2_finished, &state); state.sock0.state = &state; state.sock0.fd = sock[0]; state.sock0.fde = tevent_add_fd(state.ev, state.ev, state.sock0.fd, TEVENT_FD_READ | TEVENT_FD_WRITE, test_event_fd2_sock_handler, &state.sock0); state.sock1.state = &state; state.sock1.fd = sock[1]; state.sock1.fde = tevent_add_fd(state.ev, state.ev, state.sock1.fd, TEVENT_FD_READ | TEVENT_FD_WRITE, test_event_fd2_sock_handler, &state.sock1); tevent_fd_set_auto_close(state.sock0.fde); tevent_fd_set_auto_close(state.sock1.fde); write(state.sock0.fd, &c, 1); state.sock0.num_written++; write(state.sock1.fd, &c, 1); state.sock1.num_written++; while (!state.finished) { errno = 0; if (tevent_loop_once(state.ev) == -1) { talloc_free(state.ev); torture_fail(tctx, talloc_asprintf(tctx, "Failed event loop %s\n", strerror(errno))); } } talloc_free(state.ev); torture_assert(tctx, state.error == NULL, talloc_asprintf(tctx, "%s", state.error)); return true; }
static bool test_event_context(struct torture_context *test, const void *test_data) { struct tevent_context *ev_ctx; int fd[2] = { -1, -1 }; const char *backend = (const char *)test_data; int alarm_count=0, info_count=0; struct tevent_fd *fde_read; struct tevent_fd *fde_read_1; struct tevent_fd *fde_write; struct tevent_fd *fde_write_1; #ifdef SA_RESTART struct tevent_signal *se1 = NULL; #endif #ifdef SA_RESETHAND struct tevent_signal *se2 = NULL; #endif #ifdef SA_SIGINFO struct tevent_signal *se3 = NULL; #endif int finished=0; struct timeval t; int ret; ev_ctx = tevent_context_init_byname(test, backend); if (ev_ctx == NULL) { torture_comment(test, "event backend '%s' not supported\n", backend); return true; } torture_comment(test, "backend '%s' - %s\n", backend, __FUNCTION__); /* reset globals */ fde_count = 0; /* create a pipe */ ret = pipe(fd); torture_assert_int_equal(test, ret, 0, "pipe failed"); fde_read = tevent_add_fd(ev_ctx, ev_ctx, fd[0], TEVENT_FD_READ, fde_handler_read, fd); fde_write_1 = tevent_add_fd(ev_ctx, ev_ctx, fd[0], TEVENT_FD_WRITE, fde_handler_write_1, fd); fde_write = tevent_add_fd(ev_ctx, ev_ctx, fd[1], TEVENT_FD_WRITE, fde_handler_write, fd); fde_read_1 = tevent_add_fd(ev_ctx, ev_ctx, fd[1], TEVENT_FD_READ, fde_handler_read_1, fd); tevent_fd_set_auto_close(fde_read); tevent_fd_set_auto_close(fde_write); tevent_add_timer(ev_ctx, ev_ctx, timeval_current_ofs(2,0), finished_handler, &finished); #ifdef SA_RESTART se1 = tevent_add_signal(ev_ctx, ev_ctx, SIGALRM, SA_RESTART, count_handler, &alarm_count); torture_assert(test, se1 != NULL, "failed to setup se1"); #endif #ifdef SA_RESETHAND se2 = tevent_add_signal(ev_ctx, ev_ctx, SIGALRM, SA_RESETHAND, count_handler, &alarm_count); torture_assert(test, se2 != NULL, "failed to setup se2"); #endif #ifdef SA_SIGINFO se3 = tevent_add_signal(ev_ctx, ev_ctx, SIGUSR1, SA_SIGINFO, count_handler, &info_count); torture_assert(test, se3 != NULL, "failed to setup se3"); #endif t = timeval_current(); while (!finished) { errno = 0; if (tevent_loop_once(ev_ctx) == -1) { talloc_free(ev_ctx); torture_fail(test, talloc_asprintf(test, "Failed event loop %s\n", strerror(errno))); } } talloc_free(fde_read_1); talloc_free(fde_write_1); talloc_free(fde_read); talloc_free(fde_write); while (alarm_count < fde_count+1) { if (tevent_loop_once(ev_ctx) == -1) { break; } } torture_comment(test, "Got %.2f pipe events/sec\n", fde_count/timeval_elapsed(&t)); #ifdef SA_RESTART talloc_free(se1); #endif torture_assert_int_equal(test, alarm_count, 1+fde_count, "alarm count mismatch"); #ifdef SA_RESETHAND /* * we do not call talloc_free(se2) * because it is already gone, * after triggering the event handler. */ #endif #ifdef SA_SIGINFO talloc_free(se3); torture_assert_int_equal(test, info_count, fde_count, "info count mismatch"); #endif talloc_free(ev_ctx); return true; }
/* listen on our own address */ int ctdb_tcp_listen(struct ctdb_context *ctdb) { struct ctdb_tcp *ctcp = talloc_get_type(ctdb->private_data, struct ctdb_tcp); ctdb_sock_addr sock; int sock_size; int one = 1; struct tevent_fd *fde; /* we can either auto-bind to the first available address, or we can use a specified address */ if (!ctdb->address) { return ctdb_tcp_listen_automatic(ctdb); } sock = *ctdb->address; switch (sock.sa.sa_family) { case AF_INET: sock_size = sizeof(sock.ip); break; case AF_INET6: sock_size = sizeof(sock.ip6); break; default: DEBUG(DEBUG_ERR, (__location__ " unknown family %u\n", sock.sa.sa_family)); goto failed; } ctcp->listen_fd = socket(sock.sa.sa_family, SOCK_STREAM, IPPROTO_TCP); if (ctcp->listen_fd == -1) { ctdb_set_error(ctdb, "socket failed\n"); return -1; } set_close_on_exec(ctcp->listen_fd); if (setsockopt(ctcp->listen_fd,SOL_SOCKET,SO_REUSEADDR,(char *)&one,sizeof(one)) == -1) { DEBUG(DEBUG_WARNING, ("Failed to set REUSEADDR on fd - %s\n", strerror(errno))); } if (bind(ctcp->listen_fd, (struct sockaddr * )&sock, sock_size) != 0) { DEBUG(DEBUG_ERR,(__location__ " Failed to bind() to socket. %s(%d)\n", strerror(errno), errno)); goto failed; } if (listen(ctcp->listen_fd, 10) == -1) { goto failed; } fde = tevent_add_fd(ctdb->ev, ctcp, ctcp->listen_fd, TEVENT_FD_READ, ctdb_listen_event, ctdb); tevent_fd_set_auto_close(fde); return 0; failed: if (ctcp->listen_fd != -1) { close(ctcp->listen_fd); } ctcp->listen_fd = -1; return -1; }
/* automatically find which address to listen on */ static int ctdb_tcp_listen_automatic(struct ctdb_context *ctdb) { struct ctdb_tcp *ctcp = talloc_get_type(ctdb->private_data, struct ctdb_tcp); ctdb_sock_addr sock; int lock_fd, i; const char *lock_path = CTDB_RUNDIR "/.socket_lock"; struct flock lock; int one = 1; int sock_size; struct tevent_fd *fde; /* If there are no nodes, then it won't be possible to find * the first one. Log a failure and short circuit the whole * process. */ if (ctdb->num_nodes == 0) { DEBUG(DEBUG_CRIT,("No nodes available to attempt bind to - is the nodes file empty?\n")); return -1; } /* in order to ensure that we don't get two nodes with the same adddress, we must make the bind() and listen() calls atomic. The SO_REUSEADDR setsockopt only prevents double binds if the first socket is in LISTEN state */ lock_fd = open(lock_path, O_RDWR|O_CREAT, 0666); if (lock_fd == -1) { DEBUG(DEBUG_CRIT,("Unable to open %s\n", lock_path)); return -1; } lock.l_type = F_WRLCK; lock.l_whence = SEEK_SET; lock.l_start = 0; lock.l_len = 1; lock.l_pid = 0; if (fcntl(lock_fd, F_SETLKW, &lock) != 0) { DEBUG(DEBUG_CRIT,("Unable to lock %s\n", lock_path)); close(lock_fd); return -1; } for (i=0; i < ctdb->num_nodes; i++) { if (ctdb->nodes[i]->flags & NODE_FLAGS_DELETED) { continue; } sock = ctdb->nodes[i]->address; switch (sock.sa.sa_family) { case AF_INET: sock_size = sizeof(sock.ip); break; case AF_INET6: sock_size = sizeof(sock.ip6); break; default: DEBUG(DEBUG_ERR, (__location__ " unknown family %u\n", sock.sa.sa_family)); continue; } ctcp->listen_fd = socket(sock.sa.sa_family, SOCK_STREAM, IPPROTO_TCP); if (ctcp->listen_fd == -1) { ctdb_set_error(ctdb, "socket failed\n"); continue; } set_close_on_exec(ctcp->listen_fd); if (setsockopt(ctcp->listen_fd,SOL_SOCKET,SO_REUSEADDR, (char *)&one,sizeof(one)) == -1) { DEBUG(DEBUG_WARNING, ("Failed to set REUSEADDR on fd - %s\n", strerror(errno))); } if (bind(ctcp->listen_fd, (struct sockaddr * )&sock, sock_size) == 0) { break; } if (errno == EADDRNOTAVAIL) { DEBUG(DEBUG_DEBUG,(__location__ " Failed to bind() to socket. %s(%d)\n", strerror(errno), errno)); } else { DEBUG(DEBUG_ERR,(__location__ " Failed to bind() to socket. %s(%d)\n", strerror(errno), errno)); } close(ctcp->listen_fd); ctcp->listen_fd = -1; } if (i == ctdb->num_nodes) { DEBUG(DEBUG_CRIT,("Unable to bind to any of the node addresses - giving up\n")); goto failed; } ctdb->address = talloc_memdup(ctdb, &ctdb->nodes[i]->address, sizeof(ctdb_sock_addr)); if (ctdb->address == NULL) { ctdb_set_error(ctdb, "Out of memory at %s:%d", __FILE__, __LINE__); goto failed; } ctdb->name = talloc_asprintf(ctdb, "%s:%u", ctdb_addr_to_str(ctdb->address), ctdb_addr_to_port(ctdb->address)); if (ctdb->name == NULL) { ctdb_set_error(ctdb, "Out of memory at %s:%d", __FILE__, __LINE__); goto failed; } DEBUG(DEBUG_INFO,("ctdb chose network address %s\n", ctdb->name)); if (listen(ctcp->listen_fd, 10) == -1) { goto failed; } fde = tevent_add_fd(ctdb->ev, ctcp, ctcp->listen_fd, TEVENT_FD_READ, ctdb_listen_event, ctdb); tevent_fd_set_auto_close(fde); close(lock_fd); return 0; failed: close(lock_fd); if (ctcp->listen_fd != -1) { close(ctcp->listen_fd); ctcp->listen_fd = -1; } return -1; }
/* * called to create a new server task */ static void prefork_new_task( struct tevent_context *ev, struct loadparm_context *lp_ctx, const char *service_name, void (*new_task_fn)(struct tevent_context *, struct loadparm_context *lp_ctx, struct server_id , void *, void *), void *private_data, const struct service_details *service_details, int from_parent_fd) { pid_t pid; struct tfork* t = NULL; int i, num_children; struct tevent_context *ev2; t = tfork_create(); if (t == NULL) { smb_panic("failure in tfork\n"); } pid = tfork_child_pid(t); if (pid != 0) { struct tevent_fd *fde = NULL; int fd = tfork_event_fd(t); /* Register a pipe handler that gets called when the prefork * master process terminates. */ fde = tevent_add_fd(ev, ev, fd, TEVENT_FD_READ, prefork_child_pipe_handler, t); if (fde == NULL) { smb_panic("Failed to add child pipe handler, " "after fork"); } tevent_fd_set_auto_close(fde); return; } pid = getpid(); setproctitle("task[%s] pre-fork master", service_name); /* * this will free all the listening sockets and all state that * is not associated with this new connection */ if (tevent_re_initialise(ev) != 0) { smb_panic("Failed to re-initialise tevent after fork"); } prefork_reload_after_fork(); setup_handlers(ev, from_parent_fd); if (service_details->inhibit_pre_fork) { new_task_fn(ev, lp_ctx, cluster_id(pid, 0), private_data, NULL); /* The task does not support pre-fork */ tevent_loop_wait(ev); TALLOC_FREE(ev); exit(0); } /* * This is now the child code. We need a completely new event_context * to work with */ ev2 = s4_event_context_init(NULL); /* setup this new connection: process will bind to it's sockets etc * * While we can use ev for the child, which has been re-initialised * above we must run the new task under ev2 otherwise the children would * be listening on the sockets. Also we don't want the top level * process accepting and handling requests, it's responsible for * monitoring and controlling the child work processes. */ new_task_fn(ev2, lp_ctx, cluster_id(pid, 0), private_data, NULL); { int default_children; default_children = lpcfg_prefork_children(lp_ctx); num_children = lpcfg_parm_int(lp_ctx, NULL, "prefork children", service_name, default_children); } if (num_children == 0) { DBG_WARNING("Number of pre-fork children for %s is zero, " "NO worker processes will be started for %s\n", service_name, service_name); } DBG_NOTICE("Forking %d %s worker processes\n", num_children, service_name); /* We are now free to spawn some worker processes */ for (i=0; i < num_children; i++) { struct tfork* w = NULL; w = tfork_create(); if (w == NULL) { smb_panic("failure in tfork\n"); } pid = tfork_child_pid(w); if (pid != 0) { struct tevent_fd *fde = NULL; int fd = tfork_event_fd(w); fde = tevent_add_fd(ev, ev, fd, TEVENT_FD_READ, prefork_child_pipe_handler, w); if (fde == NULL) { smb_panic("Failed to add child pipe handler, " "after fork"); } tevent_fd_set_auto_close(fde); } else { /* tfork uses malloc */ free(w); TALLOC_FREE(ev); setproctitle("task[%s] pre-forked worker", service_name); prefork_reload_after_fork(); setup_handlers(ev2, from_parent_fd); tevent_loop_wait(ev2); talloc_free(ev2); exit(0); } } /* Don't listen on the sockets we just gave to the children */ tevent_loop_wait(ev); TALLOC_FREE(ev); /* We need to keep ev2 until we're finished for the messaging to work */ TALLOC_FREE(ev2); exit(0); }
/* * Schedule a new lock child process * Set up callback handler and timeout handler */ static void ctdb_lock_schedule(struct ctdb_context *ctdb) { struct lock_context *lock_ctx, *next_ctx, *active_ctx; int ret; TALLOC_CTX *tmp_ctx; const char *helper = BINDIR "/ctdb_lock_helper"; static const char *prog = NULL; char **args; if (prog == NULL) { const char *t; t = getenv("CTDB_LOCK_HELPER"); if (t != NULL) { prog = talloc_strdup(ctdb, t); } else { prog = talloc_strdup(ctdb, helper); } CTDB_NO_MEMORY_VOID(ctdb, prog); } if (ctdb->lock_pending == NULL) { return; } /* Find a lock context with requests */ lock_ctx = ctdb->lock_pending; while (lock_ctx != NULL) { next_ctx = lock_ctx->next; if (! lock_ctx->req_queue) { DEBUG(DEBUG_INFO, ("Removing lock context without lock requests\n")); DLIST_REMOVE(ctdb->lock_pending, lock_ctx); ctdb->lock_num_pending--; CTDB_DECREMENT_STAT(ctdb, locks.num_pending); if (lock_ctx->ctdb_db) { CTDB_DECREMENT_DB_STAT(lock_ctx->ctdb_db, locks.num_pending); } talloc_free(lock_ctx); } else { active_ctx = find_lock_context(ctdb->lock_current, lock_ctx->ctdb_db, lock_ctx->key, lock_ctx->priority, lock_ctx->type); if (active_ctx == NULL) { if (lock_ctx->ctdb_db == NULL || lock_ctx->ctdb_db->lock_num_current < MAX_LOCK_PROCESSES_PER_DB) { /* Found a lock context with lock requests */ break; } } /* There is already a child waiting for the * same key. So don't schedule another child * just yet. */ } lock_ctx = next_ctx; } if (lock_ctx == NULL) { return; } lock_ctx->child = -1; ret = pipe(lock_ctx->fd); if (ret != 0) { DEBUG(DEBUG_ERR, ("Failed to create pipe in ctdb_lock_schedule\n")); return; } set_close_on_exec(lock_ctx->fd[0]); /* Create data for child process */ tmp_ctx = talloc_new(lock_ctx); if (tmp_ctx == NULL) { DEBUG(DEBUG_ERR, ("Failed to allocate memory for helper args\n")); close(lock_ctx->fd[0]); close(lock_ctx->fd[1]); return; } /* Create arguments for lock helper */ args = lock_helper_args(tmp_ctx, lock_ctx, lock_ctx->fd[1]); if (args == NULL) { DEBUG(DEBUG_ERR, ("Failed to create lock helper args\n")); close(lock_ctx->fd[0]); close(lock_ctx->fd[1]); talloc_free(tmp_ctx); return; } lock_ctx->child = vfork(); if (lock_ctx->child == (pid_t)-1) { DEBUG(DEBUG_ERR, ("Failed to create a child in ctdb_lock_schedule\n")); close(lock_ctx->fd[0]); close(lock_ctx->fd[1]); talloc_free(tmp_ctx); return; } /* Child process */ if (lock_ctx->child == 0) { ret = execv(prog, args); if (ret < 0) { DEBUG(DEBUG_ERR, ("Failed to execute helper %s (%d, %s)\n", prog, errno, strerror(errno))); } _exit(1); } /* Parent process */ ctdb_track_child(ctdb, lock_ctx->child); close(lock_ctx->fd[1]); talloc_set_destructor(lock_ctx, ctdb_lock_context_destructor); talloc_free(tmp_ctx); /* Set up timeout handler */ lock_ctx->ttimer = tevent_add_timer(ctdb->ev, lock_ctx, timeval_current_ofs(10, 0), ctdb_lock_timeout_handler, (void *)lock_ctx); if (lock_ctx->ttimer == NULL) { ctdb_kill(ctdb, lock_ctx->child, SIGKILL); lock_ctx->child = -1; talloc_set_destructor(lock_ctx, NULL); close(lock_ctx->fd[0]); return; } /* Set up callback */ lock_ctx->tfd = tevent_add_fd(ctdb->ev, lock_ctx, lock_ctx->fd[0], EVENT_FD_READ, ctdb_lock_handler, (void *)lock_ctx); if (lock_ctx->tfd == NULL) { TALLOC_FREE(lock_ctx->ttimer); ctdb_kill(ctdb, lock_ctx->child, SIGKILL); lock_ctx->child = -1; talloc_set_destructor(lock_ctx, NULL); close(lock_ctx->fd[0]); return; } tevent_fd_set_auto_close(lock_ctx->tfd); /* Move the context from pending to current */ DLIST_REMOVE(ctdb->lock_pending, lock_ctx); ctdb->lock_num_pending--; DLIST_ADD_END(ctdb->lock_current, lock_ctx, NULL); if (lock_ctx->ctdb_db) { lock_ctx->ctdb_db->lock_num_current++; CTDB_INCREMENT_STAT(lock_ctx->ctdb, locks.num_current); CTDB_INCREMENT_DB_STAT(lock_ctx->ctdb_db, locks.num_current); } }