static void _msg_engine(void) { slurm_addr_t *cli; slurm_fd_t sock; msg_pthread = pthread_self(); slurmd_req(NULL); /* initialize timer */ while (!_shutdown) { if (_reconfig) { verbose("got reconfigure request"); _wait_for_all_threads(5); /* Wait for RPCs to finish */ _reconfigure(); } cli = xmalloc (sizeof (slurm_addr_t)); if ((sock = slurm_accept_msg_conn(conf->lfd, cli)) >= 0) { _handle_connection(sock, cli); continue; } /* * Otherwise, accept() failed. */ xfree (cli); if (errno == EINTR) continue; error("accept: %m"); } verbose("got shutdown request"); slurm_shutdown_msg_engine(conf->lfd); return; }
/*************************************************************************** * Support functions for slurm_allocate_resources_blocking() ***************************************************************************/ static listen_t *_create_allocation_response_socket(char *interface_hostname) { listen_t *listen = NULL; uint16_t *ports; listen = xmalloc(sizeof(listen_t)); if ((ports = slurm_get_srun_port_range())) listen->fd = slurm_init_msg_engine_ports(ports); else listen->fd = slurm_init_msg_engine_port(0); if (listen->fd < 0) { error("slurm_init_msg_engine_port error %m"); return NULL; } if (slurm_get_stream_addr(listen->fd, &listen->address) < 0) { error("slurm_get_stream_addr error %m"); slurm_shutdown_msg_engine(listen->fd); return NULL; } listen->hostname = xstrdup(interface_hostname); /* FIXME - screw it! I can't seem to get the port number through slurm_* functions */ listen->port = ntohs(listen->address.sin_port); fd_set_nonblocking(listen->fd); return listen; }
/* Process incoming RPCs. Meant to execute as a pthread */ extern void *rpc_mgr(void *no_data) { int sockfd, newsockfd; int i; uint16_t port; slurm_addr_t cli_addr; slurmdbd_conn_t *conn_arg = NULL; master_thread_id = pthread_self(); /* initialize port for RPCs */ if ((sockfd = slurm_init_msg_engine_port(get_dbd_port())) == SLURM_SOCKET_ERROR) fatal("slurm_init_msg_engine_port error %m"); slurm_persist_conn_recv_server_init(); /* * Process incoming RPCs until told to shutdown */ while (!shutdown_time && (i = slurm_persist_conn_wait_for_thread_loc()) >= 0) { /* * accept needed for stream implementation is a no-op in * message implementation that just passes sockfd to newsockfd */ if ((newsockfd = slurm_accept_msg_conn(sockfd, &cli_addr)) == SLURM_SOCKET_ERROR) { slurm_persist_conn_free_thread_loc(i); if (errno != EINTR) error("slurm_accept_msg_conn: %m"); continue; } fd_set_nonblocking(newsockfd); conn_arg = xmalloc(sizeof(slurmdbd_conn_t)); conn_arg->conn = xmalloc(sizeof(slurm_persist_conn_t)); conn_arg->conn->fd = newsockfd; conn_arg->conn->flags = PERSIST_FLAG_DBD; conn_arg->conn->callback_proc = proc_req; conn_arg->conn->callback_fini = _connection_fini_callback; conn_arg->conn->shutdown = &shutdown_time; conn_arg->conn->version = SLURM_MIN_PROTOCOL_VERSION; conn_arg->conn->rem_host = xmalloc_nz(sizeof(char) * 16); /* Don't fill in the rem_port here. It will be filled in * later if it is a slurmctld connection. */ slurm_get_ip_str(&cli_addr, &port, conn_arg->conn->rem_host, sizeof(char) * 16); slurm_persist_conn_recv_thread_init( conn_arg->conn, i, conn_arg); } debug("rpc_mgr shutting down"); (void) slurm_shutdown_msg_engine(sockfd); pthread_exit((void *) 0); return NULL; }
/* Finalization processing */ void slurm_pmi_finalize(void) { if (pmi_fd >= 0) { slurm_shutdown_msg_engine(pmi_fd); pmi_fd = -1; } srun_port = 0; }
static void _close_fd(void) { if (event_fd == -1) return; (void) slurm_shutdown_msg_engine(event_fd); event_fd = -1; }
static void _destroy_allocation_response_socket(listen_t *listen) { xassert(listen != NULL); slurm_shutdown_msg_engine(listen->fd); if (listen->hostname) xfree(listen->hostname); xfree(listen); }
/*****************************************************************************\ * message hander thread \*****************************************************************************/ static void *_msg_thread(void *no_data) { slurm_fd_t sock_fd = -1, new_fd; slurm_addr_t cli_addr; char *msg; int i; /* If JobSubmitDynAllocPort is already taken, keep trying to open it * once per minute. Slurmctld will continue to function * during this interval even if nothing can be scheduled. */ for (i=0; (!thread_shutdown); i++) { if (i > 0) sleep(60); sock_fd = slurm_init_msg_engine_port(sched_port); if (sock_fd != SLURM_SOCKET_ERROR) break; error("dynalloc: slurm_init_msg_engine_port %u %m", sched_port); error("dynalloc: Unable to communicate with ORTE RAS"); } /* Process incoming RPCs until told to shutdown */ while (!thread_shutdown) { if ((new_fd = slurm_accept_msg_conn(sock_fd, &cli_addr)) == SLURM_SOCKET_ERROR) { if (errno != EINTR) error("dyalloc: slurm_accept_msg_conn %m"); continue; } if (thread_shutdown) { close(new_fd); break; } err_code = 0; err_msg = ""; msg = _recv_msg(new_fd); if (msg) { _proc_msg(new_fd, msg); xfree(msg); } slurm_close_accepted_conn(new_fd); } verbose("dynalloc: message engine shutdown"); if (sock_fd > 0) (void) slurm_shutdown_msg_engine(sock_fd); pthread_exit((void *) 0); return NULL; }
static void *_msg_thread(void *no_data) { int sock_fd = -1, new_fd; slurm_addr_t cli_addr; char *msg; int i; /* If Port is already taken, keep trying to open it 10 secs */ for (i = 0; (!thread_shutdown); i++) { if (i > 0) sleep(10); sock_fd = slurm_init_msg_engine_port(nonstop_comm_port); if (sock_fd != SLURM_SOCKET_ERROR) break; error("slurmctld/nonstop: can not open port: %hu %m", nonstop_comm_port); } /* Process incoming RPCs until told to shutdown */ while (!thread_shutdown) { new_fd = slurm_accept_msg_conn(sock_fd, &cli_addr); if (new_fd == SLURM_SOCKET_ERROR) { if (errno != EINTR) { info("slurmctld/nonstop: " "slurm_accept_msg_conn %m"); } continue; } if (thread_shutdown) { close(new_fd); break; } /* It would be nice to create a pthread for each new * RPC, but that leaks memory on some systems when * done from a plugin. Alternately, we could maintain * a pool of pthreads and reuse them. */ msg = _recv_msg(new_fd); if (msg) { _proc_msg(new_fd, msg, cli_addr); xfree(msg); } slurm_close(new_fd); } debug("slurmctld/nonstop: message engine shutdown"); if (sock_fd > 0) (void) slurm_shutdown_msg_engine(sock_fd); pthread_exit((void *) 0); return NULL; }
/*************************************************************************** * Support functions for slurm_allocate_resources_blocking() ***************************************************************************/ static listen_t *_create_allocation_response_socket(char *interface_hostname) { listen_t *listen = NULL; listen = xmalloc(sizeof(listen_t)); /* port "0" lets the operating system pick any port */ if ((listen->fd = slurm_init_msg_engine_port(0)) < 0) { error("slurm_init_msg_engine_port error %m"); return NULL; } if (slurm_get_stream_addr(listen->fd, &listen->address) < 0) { error("slurm_get_stream_addr error %m"); slurm_shutdown_msg_engine(listen->fd); return NULL; } listen->hostname = xstrdup(interface_hostname); /* FIXME - screw it! I can't seem to get the port number through slurm_* functions */ listen->port = ntohs(listen->address.sin_port); fd_set_nonblocking(listen->fd); return listen; }
/*****************************************************************************\ * message hander thread \*****************************************************************************/ static void *_msg_thread(void *no_data) { slurm_fd_t sock_fd = -1, new_fd; slurm_addr_t cli_addr; char *msg; slurm_ctl_conf_t *conf; int i; /* Locks: Write configuration, job, node, and partition */ slurmctld_lock_t config_write_lock = { WRITE_LOCK, WRITE_LOCK, WRITE_LOCK, WRITE_LOCK }; conf = slurm_conf_lock(); sched_port = conf->schedport; slurm_conf_unlock(); /* Wait until configuration is completely loaded */ lock_slurmctld(config_write_lock); unlock_slurmctld(config_write_lock); /* If SchedulerPort is already taken, keep trying to open it * once per minute. Slurmctld will continue to function * during this interval even if nothing can be scheduled. */ for (i=0; (!thread_shutdown); i++) { if (i > 0) sleep(60); sock_fd = slurm_init_msg_engine_port(sched_port); if (sock_fd != SLURM_SOCKET_ERROR) break; error("wiki: slurm_init_msg_engine_port %u %m", sched_port); error("wiki: Unable to communicate with Moab"); } /* Process incoming RPCs until told to shutdown */ while (!thread_shutdown) { if ((new_fd = slurm_accept_msg_conn(sock_fd, &cli_addr)) == SLURM_SOCKET_ERROR) { if (errno != EINTR) error("wiki: slurm_accept_msg_conn %m"); continue; } if (thread_shutdown) { close(new_fd); break; } /* It would be nice to create a pthread for each new * RPC, but that leaks memory on some systems when * done from a plugin. * FIXME: Maintain a pool of and reuse them. */ err_code = 0; err_msg = ""; msg = _recv_msg(new_fd); if (msg) { _proc_msg(new_fd, msg); xfree(msg); } slurm_close_accepted_conn(new_fd); } if (sock_fd > 0) (void) slurm_shutdown_msg_engine(sock_fd); pthread_exit((void *) 0); return NULL; }
/* Process incoming RPCs. Meant to execute as a pthread */ extern void *rpc_mgr(void *no_data) { pthread_attr_t thread_attr_rpc_req; slurm_fd_t sockfd, newsockfd; int i, retry_cnt, sigarray[] = {SIGUSR1, 0}; slurm_addr_t cli_addr; slurmdbd_conn_t *conn_arg = NULL; slurm_mutex_lock(&thread_count_lock); master_thread_id = pthread_self(); slurm_mutex_unlock(&thread_count_lock); (void) pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); (void) pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); /* threads to process individual RPC's are detached */ slurm_attr_init(&thread_attr_rpc_req); if (pthread_attr_setdetachstate (&thread_attr_rpc_req, PTHREAD_CREATE_DETACHED)) fatal("pthread_attr_setdetachstate %m"); /* initialize port for RPCs */ if ((sockfd = slurm_init_msg_engine_port(get_dbd_port())) == SLURM_SOCKET_ERROR) fatal("slurm_init_msg_engine_port error %m"); /* Prepare to catch SIGUSR1 to interrupt accept(). * This signal is generated by the slurmdbd signal * handler thread upon receipt of SIGABRT, SIGINT, * or SIGTERM. That thread does all processing of * all signals. */ xsignal(SIGUSR1, _sig_handler); xsignal_unblock(sigarray); /* * Process incoming RPCs until told to shutdown */ while ((i = _wait_for_server_thread()) >= 0) { /* * accept needed for stream implementation is a no-op in * message implementation that just passes sockfd to newsockfd */ if ((newsockfd = slurm_accept_msg_conn(sockfd, &cli_addr)) == SLURM_SOCKET_ERROR) { _free_server_thread((pthread_t) 0); if (errno != EINTR) error("slurm_accept_msg_conn: %m"); continue; } fd_set_nonblocking(newsockfd); conn_arg = xmalloc(sizeof(slurmdbd_conn_t)); conn_arg->newsockfd = newsockfd; slurm_get_ip_str(&cli_addr, &conn_arg->orig_port, conn_arg->ip, sizeof(conn_arg->ip)); retry_cnt = 0; while (pthread_create(&slave_thread_id[i], &thread_attr_rpc_req, _service_connection, (void *) conn_arg)) { if (retry_cnt > 0) { error("pthread_create failure, " "aborting RPC: %m"); close(newsockfd); break; } error("pthread_create failure: %m"); retry_cnt++; usleep(1000); /* retry in 1 msec */ } } debug3("rpc_mgr shutting down"); slurm_attr_destroy(&thread_attr_rpc_req); (void) slurm_shutdown_msg_engine(sockfd); _wait_for_thread_fini(); pthread_exit((void *) 0); return NULL; }