/* ------------------ */ static void sigterm_handler(int sig) { switch( sig ) { case SIGTERM : LOG(log_info, logtype_afpd, "shutting down on signal %d", sig ); break; default : LOG(log_error, logtype_afpd, "unexpected signal: %d", sig); } daemon_exit(0); }
void shairport_shutdown(int retval) { if (shutting_down) return; shutting_down = 1; print_log(stdout, "Shutting down...\n"); mdns_unregister(); rtsp_shutdown_stream(); if (config.output) config.output->deinit(); daemon_exit(); // This does nothing if not in daemon mode exit(retval); }
/* ------------------ */ int main(int argc, char *argv[]) { char volpath[MAXPATHLEN + 1]; int len, actual_len; pid_t pid; int status; char *dbdpn = _PATH_CNID_DBD; char *host = DEFAULTHOST; char *port = DEFAULTPORT; int i; int cc; uid_t uid = 0; gid_t gid = 0; int err = 0; int debug = 0; int ret; char *loglevel = NULL; char *logfile = NULL; sigset_t set; struct volinfo *volinfo; set_processname("cnid_metad"); while (( cc = getopt( argc, argv, "ds:p:h:u:g:l:f:")) != -1 ) { switch (cc) { case 'd': debug = 1; break; case 'h': host = strdup(optarg); break; case 'u': uid = user_to_uid (optarg); if (!uid) { LOG(log_error, logtype_cnid, "main: bad user %s", optarg); err++; } break; case 'g': gid =group_to_gid (optarg); if (!gid) { LOG(log_error, logtype_cnid, "main: bad group %s", optarg); err++; } break; case 'p': port = strdup(optarg); break; case 's': dbdpn = strdup(optarg); break; case 'l': loglevel = strdup(optarg); break; case 'f': logfile = strdup(optarg); break; default: err++; break; } } if (loglevel) { strlcpy(logconfig + 8, loglevel, 13); free(loglevel); strcat(logconfig, " "); } if (logfile) { strlcat(logconfig, logfile, MAXPATHLEN); free(logfile); } setuplog(logconfig); if (err) { LOG(log_error, logtype_cnid, "main: bad arguments"); daemon_exit(1); } /* Check PID lockfile and become a daemon */ switch(server_lock("cnid_metad", _PATH_CNID_METAD_LOCK, debug)) { case -1: /* error */ daemon_exit(EXITERR_SYS); case 0: /* child */ break; default: /* server */ exit(0); } if ((srvfd = tsockfd_create(host, port, 10)) < 0) daemon_exit(1); /* switch uid/gid */ if (uid || gid) { LOG(log_debug, logtype_cnid, "Setting uid/gid to %i/%i", uid, gid); if (gid) { if (SWITCH_TO_GID(gid) < 0) { LOG(log_info, logtype_cnid, "unable to switch to group %d", gid); daemon_exit(1); } } if (uid) { if (SWITCH_TO_UID(uid) < 0) { LOG(log_info, logtype_cnid, "unable to switch to user %d", uid); daemon_exit(1); } } } set_signal(); sigemptyset(&set); sigprocmask(SIG_SETMASK, NULL, &set); sigdelset(&set, SIGCHLD); while (1) { rqstfd = usockfd_check(srvfd, &set); /* Collect zombie processes and log what happened to them */ if (sigchild) while ((pid = waitpid(-1, &status, WNOHANG)) > 0) { for (i = 0; i < MAXVOLS; i++) { if (srv[i].pid == pid) { srv[i].pid = 0; close(srv[i].control_fd); break; } } if (WIFEXITED(status)) { LOG(log_info, logtype_cnid, "cnid_dbd pid %i exited with exit code %i", pid, WEXITSTATUS(status)); } else if (WIFSIGNALED(status)) { LOG(log_info, logtype_cnid, "cnid_dbd pid %i exited with signal %i", pid, WTERMSIG(status)); } sigchild = 0; } if (rqstfd <= 0) continue; ret = readt(rqstfd, &len, sizeof(int), 1, 4); if (!ret) { /* already close */ goto loop_end; } else if (ret < 0) { LOG(log_severe, logtype_cnid, "error read: %s", strerror(errno)); goto loop_end; } else if (ret != sizeof(int)) { LOG(log_error, logtype_cnid, "short read: got %d", ret); goto loop_end; } /* * checks for buffer overruns. The client libatalk side does it too * before handing the dir path over but who trusts clients? */ if (!len || len +DBHOMELEN +2 > MAXPATHLEN) { LOG(log_error, logtype_cnid, "wrong len parameter: %d", len); goto loop_end; } actual_len = readt(rqstfd, volpath, len, 1, 5); if (actual_len < 0) { LOG(log_severe, logtype_cnid, "Read(2) error : %s", strerror(errno)); goto loop_end; } if (actual_len != len) { LOG(log_error, logtype_cnid, "error/short read (dir): %s", strerror(errno)); goto loop_end; } volpath[len] = '\0'; /* Load .volinfo file */ if ((volinfo = allocvolinfo(volpath)) == NULL) { LOG(log_severe, logtype_cnid, "allocvolinfo(\"%s\"): %s", volpath, strerror(errno)); goto loop_end; } if (set_dbdir(volinfo->v_dbpath) < 0) { goto loop_end; } maybe_start_dbd(dbdpn, volinfo); (void)closevolinfo(volinfo); loop_end: close(rqstfd); } }
/* ----------------------- */ static void set_signal(void) { struct sigaction sv; sigset_t set; memset(&sv, 0, sizeof(sv)); /* Catch SIGCHLD */ sv.sa_handler = catch_child; sv.sa_flags = SA_NOCLDSTOP; sigemptyset(&sv.sa_mask); if (sigaction(SIGCHLD, &sv, NULL) < 0) { LOG(log_error, logtype_cnid, "cnid_metad: sigaction: %s", strerror(errno)); daemon_exit(EXITERR_SYS); } /* Catch SIGTERM */ sv.sa_handler = sigterm_handler; sigfillset(&sv.sa_mask ); if (sigaction(SIGTERM, &sv, NULL ) < 0 ) { LOG(log_error, logtype_afpd, "sigaction: %s", strerror(errno) ); daemon_exit(EXITERR_SYS); } /* Ignore the rest */ sv.sa_handler = SIG_IGN; sigemptyset(&sv.sa_mask ); if (sigaction(SIGALRM, &sv, NULL ) < 0 ) { LOG(log_error, logtype_afpd, "sigaction: %s", strerror(errno) ); daemon_exit(EXITERR_SYS); } sv.sa_handler = SIG_IGN; sigemptyset(&sv.sa_mask ); if (sigaction(SIGHUP, &sv, NULL ) < 0 ) { LOG(log_error, logtype_afpd, "sigaction: %s", strerror(errno) ); daemon_exit(EXITERR_SYS); } sv.sa_handler = SIG_IGN; sigemptyset(&sv.sa_mask ); if (sigaction(SIGUSR1, &sv, NULL ) < 0 ) { LOG(log_error, logtype_afpd, "sigaction: %s", strerror(errno) ); daemon_exit(EXITERR_SYS); } sv.sa_handler = SIG_IGN; sigemptyset(&sv.sa_mask ); if (sigaction(SIGUSR2, &sv, NULL ) < 0 ) { LOG(log_error, logtype_afpd, "sigaction: %s", strerror(errno) ); daemon_exit(EXITERR_SYS); } sv.sa_handler = SIG_IGN; sigemptyset(&sv.sa_mask ); if (sigaction(SIGPIPE, &sv, NULL ) < 0 ) { LOG(log_error, logtype_afpd, "sigaction: %s", strerror(errno) ); daemon_exit(EXITERR_SYS); } /* block everywhere but in pselect */ sigemptyset(&set); sigaddset(&set, SIGCHLD); sigprocmask(SIG_BLOCK, &set, NULL); }
/* -------------------- */ static int maybe_start_dbd(char *dbdpn, struct volinfo *volinfo) { pid_t pid; struct server *up; int sv[2]; int i; time_t t; char buf1[8]; char buf2[8]; char *volpath = volinfo->v_path; LOG(log_debug, logtype_cnid, "maybe_start_dbd: Volume: \"%s\"", volpath); up = test_usockfn(volinfo); if (up && up->pid) { /* we already have a process, send our fd */ if (send_fd(up->control_fd, rqstfd) < 0) { /* FIXME */ return -1; } return 0; } LOG(log_maxdebug, logtype_cnid, "maybe_start_dbd: no cnid_dbd for that volume yet"); time(&t); if (!up) { /* find an empty slot */ for (i = 0; i < MAXVOLS; i++) { if (srv[i].volinfo == NULL) { up = &srv[i]; up->volinfo = volinfo; retainvolinfo(volinfo); up->tm = t; up->count = 0; break; } } if (!up) { LOG(log_error, logtype_cnid, "no free slot for cnid_dbd child. Configured maximum: %d. Do you have so many volumes?", MAXVOLS); return -1; } } else { /* we have a slot but no process, check for respawn too fast */ if ( (t < (up->tm + TESTTIME)) /* We're in the respawn time window */ && (up->count > MAXSPAWN) ) { /* ...and already tried to fork too often */ LOG(log_maxdebug, logtype_cnid, "maybe_start_dbd: respawn too fast just exiting"); return -1; /* just exit, dont sleep, because we might have work to do for another client */ } if ( t >= (up->tm + TESTTIME) ) { /* out of respawn too fast windows reset the count */ LOG(log_maxdebug, logtype_cnid, "maybe_start_dbd: respawn window ended"); up->tm = t; up->count = 0; } up->count++; LOG(log_maxdebug, logtype_cnid, "maybe_start_dbd: respawn count now is: %u", up->count); if (up->count > MAXSPAWN) { /* We spawned too fast. From now until the first time we tried + TESTTIME seconds we will just return -1 above */ LOG(log_maxdebug, logtype_cnid, "maybe_start_dbd: reached MAXSPAWN threshhold"); } } /* Create socketpair for comm between parent and child. We use it to pass fds from connecting afpd processes to our cnid_dbd child via fd passing. */ if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) < 0) { LOG(log_error, logtype_cnid, "error in socketpair: %s", strerror(errno)); return -1; } if ((pid = fork()) < 0) { LOG(log_error, logtype_cnid, "error in fork: %s", strerror(errno)); return -1; } if (pid == 0) { int ret; /* * Child. Close descriptors and start the daemon. If it fails * just log it. The client process will fail connecting * afterwards anyway. */ close(srvfd); close(sv[0]); for (i = 0; i < MAXVOLS; i++) { if (srv[i].pid && up != &srv[i]) { close(srv[i].control_fd); } } sprintf(buf1, "%i", sv[1]); sprintf(buf2, "%i", rqstfd); if (up->count == MAXSPAWN) { /* there's a pb with the db inform child * it will run recover, delete the db whatever */ LOG(log_error, logtype_cnid, "try with -d %s", up->volinfo->v_path); ret = execlp(dbdpn, dbdpn, "-d", volpath, buf1, buf2, logconfig, NULL); } else { ret = execlp(dbdpn, dbdpn, volpath, buf1, buf2, logconfig, NULL); } /* Yikes! We're still here, so exec failed... */ LOG(log_error, logtype_cnid, "Fatal error in exec: %s", strerror(errno)); daemon_exit(0); } /* * Parent. */ up->pid = pid; close(sv[1]); up->control_fd = sv[0]; return 0; }
/** * The main routine for the clms daemon. * @param argc * @param argv * * @return int */ int main(int argc, char *argv[]) { NCS_SEL_OBJ mbx_fd; SaAisErrorT error = SA_AIS_OK; uint32_t rc; osaf_cluster = NULL; int term_fd; int timeout = -1; daemonize(argc, argv); if (clms_init() != NCSCC_RC_SUCCESS) { LOG_ER("clms_init failed"); goto done; } mbx_fd = ncs_ipc_get_sel_obj(&clms_cb->mbx); daemon_sigterm_install(&term_fd); /* Set up all file descriptors to listen to */ fds[FD_TERM].fd = term_fd; fds[FD_TERM].events = POLLIN; fds[FD_AMF].fd = clms_cb->nid_started ? usr1_sel_obj.rmv_obj : clms_cb->amf_sel_obj; fds[FD_AMF].events = POLLIN; fds[FD_MBCSV].fd = clms_cb->mbcsv_sel_obj; fds[FD_MBCSV].events = POLLIN; fds[FD_MBX].fd = mbx_fd.rmv_obj; fds[FD_MBX].events = POLLIN; fds[FD_IMM].fd = clms_cb->imm_sel_obj; fds[FD_IMM].events = POLLIN; #ifdef ENABLE_AIS_PLM fds[FD_PLM].fd = clms_cb->plm_sel_obj; fds[FD_PLM].events = POLLIN; #endif while (1) { if (clms_cb->rtu_pending == true) { TRACE("There is an IMM task to be tried again. setting poll time out to 500"); timeout = 500; } else { timeout = -1; } if ((clms_cb->immOiHandle != 0) && (clms_cb->is_impl_set == true)) { fds[FD_IMM].fd = clms_cb->imm_sel_obj; fds[FD_IMM].events = POLLIN; nfds = NUM_FD; } else { nfds = NUM_FD - 1; } int ret = poll(fds, nfds, timeout); if (ret == -1) { if (errno == EINTR) continue; LOG_ER("poll failed - %s", strerror(errno)); break; } if (ret == 0) { /* Process any/all pending RTAttribute updates to IMM */ TRACE("poll time out processing pending updates"); clms_retry_pending_rtupdates(); continue; } if (fds[FD_TERM].revents & POLLIN) { daemon_exit(); } if (fds[FD_AMF].revents & POLLIN) { if (clms_cb->amf_hdl != 0) { if ((error = saAmfDispatch(clms_cb->amf_hdl, SA_DISPATCH_ALL)) != SA_AIS_OK) { LOG_ER("saAmfDispatch failed: %u", error); break; } } else { TRACE("SIGUSR1 event rec"); ncs_sel_obj_rmv_ind(usr1_sel_obj, true, true); ncs_sel_obj_destroy(usr1_sel_obj); if (clms_amf_init(clms_cb) != NCSCC_RC_SUCCESS) { LOG_ER("AMF Initialization failed"); break; } TRACE("AMF Initialization SUCCESS......"); fds[FD_AMF].fd = clms_cb->amf_sel_obj; } } if (fds[FD_MBCSV].revents & POLLIN) { if ((rc = clms_mbcsv_dispatch(clms_cb->mbcsv_hdl)) != NCSCC_RC_SUCCESS) { LOG_ER("MBCSv Dispatch Failed"); break; } } if (fds[FD_MBX].revents & POLLIN) { clms_process_mbx(&clms_cb->mbx); } #ifdef ENABLE_AIS_PLM /*Incase the Immnd restart is not supported fully,have to reint imm - TO Be Done */ if (clms_cb->reg_with_plm == SA_TRUE){ if (fds[FD_PLM].revents & POLLIN) { if ((error = saPlmDispatch(clms_cb->plm_hdl, SA_DISPATCH_ALL)) != SA_AIS_OK) { LOG_ER("saPlmDispatch FAILED: %u", error); break; } } } #endif if (clms_cb->immOiHandle && fds[FD_IMM].revents & POLLIN) { if ((error = saImmOiDispatch(clms_cb->immOiHandle, SA_DISPATCH_ALL)) != SA_AIS_OK) { if (error == SA_AIS_ERR_BAD_HANDLE) { TRACE("main :saImmOiDispatch returned BAD_HANDLE"); /* * Invalidate the IMM OI handle, this info is used in other * locations. E.g. giving TRY_AGAIN responses to a create and * close app stream requests. That is needed since the IMM OI * is used in context of these functions. * * Also closing the handle. Finalize is ok with a bad handle * that is bad because it is stale and this actually clears * the handle from internal agent structures. In any case * we ignore the return value from Finalize here. */ saImmOiFinalize(clms_cb->immOiHandle); clms_cb->immOiHandle = 0; clms_cb->is_impl_set = false; /* Initiate IMM reinitializtion in the background */ clm_imm_reinit_bg(clms_cb); } else if (error != SA_AIS_OK) { LOG_ER("saImmOiDispatch FAILED: %u", error); break; } } } /* Retry any pending updates */ if (clms_cb->rtu_pending == true) clms_retry_pending_rtupdates(); } /* End while (1) */ done: LOG_ER("Failed, exiting..."); TRACE_LEAVE(); exit(1); }
/**************************************************************************** * Name : glnd_main_process * * Description : This is the function which is given as a input to the * GLND task. * * Arguments : mbx - This is the mail box pointer on which GLND is * going to block. * * Return Values : None. * * Notes : None. *****************************************************************************/ void glnd_main_process(SYSF_MBX *mbx) { NCS_SEL_OBJ mbx_fd = m_NCS_IPC_GET_SEL_OBJ(mbx); GLND_CB *glnd_cb = NULL; TRACE_ENTER(); SaAmfHandleT amf_hdl; SaSelectionObjectT amf_sel_obj; SaAisErrorT amf_error; struct pollfd sel[NUM_FD]; int term_fd; /* take the handle */ glnd_cb = (GLND_CB *)m_GLND_TAKE_GLND_CB; if (!glnd_cb) { LOG_ER("GLND cb take handle failed"); goto end; } amf_hdl = glnd_cb->amf_hdl; /*giveup the handle */ m_GLND_GIVEUP_GLND_CB; amf_error = saAmfSelectionObjectGet(amf_hdl, &amf_sel_obj); if (amf_error != SA_AIS_OK) { LOG_ER("GLND amf get sel obj error"); goto end; } daemon_sigterm_install(&term_fd); sel[FD_TERM].fd = term_fd; sel[FD_TERM].events = POLLIN; sel[FD_AMF].fd = amf_sel_obj; sel[FD_AMF].events = POLLIN; sel[FD_MBX].fd = m_GET_FD_FROM_SEL_OBJ(mbx_fd); sel[FD_MBX].events = POLLIN; while (osaf_poll(&sel[0], NUM_FD, -1) > 0) { if (sel[FD_TERM].revents & POLLIN) { daemon_exit(); } if (((sel[FD_AMF].revents | sel[FD_MBX].revents) & (POLLERR | POLLHUP | POLLNVAL)) != 0) { LOG_ER("GLND poll() failure: %hd %hd", sel[FD_AMF].revents, sel[FD_MBX].revents); TRACE_LEAVE(); return; } /* process all the AMF messages */ if (sel[FD_AMF].revents & POLLIN) { /* dispatch all the AMF pending function */ amf_error = saAmfDispatch(amf_hdl, SA_DISPATCH_ALL); if (amf_error != SA_AIS_OK) { TRACE_2("GLND amf dispatch failure"); } } /* process the GLND Mail box */ if (sel[FD_MBX].revents & POLLIN) { glnd_cb = (GLND_CB *)m_GLND_TAKE_GLND_CB; if (glnd_cb) { /* now got the IPC mail box event */ glnd_process_mbx(glnd_cb, mbx); m_GLND_GIVEUP_GLND_CB; /* giveup the handle */ } else break; } } TRACE("DANGER: Exiting the Select loop of GLND"); end: TRACE_LEAVE(); return; }
int main(int argc, char *argv[]) { uint32_t rc; nfds_t nfds = 4; struct pollfd fds[nfds + RDA_MAX_CLIENTS]; int i, ret; NCS_SEL_OBJ mbx_sel_obj; RDE_RDA_CB *rde_rda_cb = &rde_cb->rde_rda_cb; int term_fd; daemonize(argc, argv); if (initialize_rde() != NCSCC_RC_SUCCESS) goto init_failed; mbx_sel_obj = ncs_ipc_get_sel_obj(&rde_cb->mbx); if ((rc = discover_peer(mbx_sel_obj.rmv_obj)) == NCSCC_RC_FAILURE) goto init_failed; if ((rc = determine_role(mbx_sel_obj.rmv_obj)) == NCSCC_RC_FAILURE) goto init_failed; /* If AMF started register immediately */ if (!rde_cb->rde_amf_cb.nid_started && (rc = rde_amf_init(&rde_cb->rde_amf_cb)) != NCSCC_RC_SUCCESS) { goto init_failed; } if (rde_cb->rde_amf_cb.nid_started && nid_notify("RDE", rc, NULL) != NCSCC_RC_SUCCESS) { LOG_ER("nid_notify failed"); goto done; } daemon_sigterm_install(&term_fd); fds[FD_TERM].fd = term_fd; fds[FD_TERM].events = POLLIN; /* USR1/AMF fd */ fds[FD_AMF].fd = rde_cb->rde_amf_cb.nid_started ? usr1_sel_obj.rmv_obj : rde_cb->rde_amf_cb.amf_fd; fds[FD_AMF].events = POLLIN; /* Mailbox */ fds[FD_MBX].fd = mbx_sel_obj.rmv_obj; fds[FD_MBX].events = POLLIN; /* RDA server socket */ fds[FD_RDA_SERVER].fd = rde_cb->rde_rda_cb.fd; fds[FD_RDA_SERVER].events = POLLIN; while (1) { ret = poll(fds, nfds, -1); if (ret == -1) { if (errno == EINTR) continue; LOG_ER("poll failed - %s", strerror(errno)); break; } if (fds[FD_TERM].revents & POLLIN) { daemon_exit(); } if (fds[FD_AMF].revents & POLLIN) { if (rde_cb->rde_amf_cb.amf_hdl != 0) { SaAisErrorT error; TRACE("AMF event rec"); if ((error = saAmfDispatch(rde_cb->rde_amf_cb.amf_hdl, SA_DISPATCH_ALL)) != SA_AIS_OK) { LOG_ER("saAmfDispatch failed: %u", error); goto done; } } else { TRACE("SIGUSR1 event rec"); ncs_sel_obj_destroy(&usr1_sel_obj); if (rde_amf_init(&rde_cb->rde_amf_cb) != NCSCC_RC_SUCCESS) goto done; fds[FD_AMF].fd = rde_cb->rde_amf_cb.amf_fd; } } if (fds[FD_MBX].revents & POLLIN) handle_mbx_event(); if (fds[FD_RDA_SERVER].revents & POLLIN) { int newsockfd; newsockfd = accept(rde_rda_cb->fd, (struct sockaddr *)NULL, NULL); if (newsockfd < 0) { LOG_ER("accept FAILED %s", strerror(errno)); goto done; } /* Add the new client fd to client-list */ rde_rda_cb->clients[rde_rda_cb->client_count].is_async = false; rde_rda_cb->clients[rde_rda_cb->client_count].fd = newsockfd; rde_rda_cb->client_count++; /* Update poll fd selection */ fds[nfds].fd = newsockfd; fds[nfds].events = POLLIN; nfds++; TRACE("accepted new client, fd=%d, idx=%d, nfds=%lu", newsockfd, rde_rda_cb->client_count, nfds); } for (i = FD_CLIENT_START; i < nfds; i++) { if (fds[i].revents & POLLIN) { int client_disconnected = 0; TRACE("received msg on fd %u", fds[i].fd); rde_rda_client_process_msg(rde_rda_cb, fd_to_client_ixd(fds[i].fd), &client_disconnected); if (client_disconnected) { /* reinitialize the fd array & nfds */ nfds = FD_CLIENT_START; for (i = 0; i < rde_rda_cb->client_count; i++, nfds++) { fds[i + FD_CLIENT_START].fd = rde_rda_cb->clients[i].fd; fds[i + FD_CLIENT_START].events = POLLIN; } TRACE("client disconnected, fd array reinitialized, nfds=%lu", nfds); break; } } } } init_failed: if (rde_cb->rde_amf_cb.nid_started && nid_notify("RDE", NCSCC_RC_FAILURE, NULL) != NCSCC_RC_SUCCESS) { LOG_ER("nid_notify failed"); rc = NCSCC_RC_FAILURE; } done: syslog(LOG_ERR, "Exiting..."); exit(1); }
/**************************************************************************** * Name : eds_main_process * * Description : This is the function which is given as a input to the * EDS task. * This function will be select of both the FD's (AMF FD and * Mail Box FD), depending on which FD has been selected, it * will call the corresponding routines. * * Arguments : mbx - This is the mail box pointer on which EDS is * going to block. * * Return Values : None. * * Notes : None. *****************************************************************************/ void eds_main_process(SYSF_MBX *mbx) { NCS_SEL_OBJ mbx_fd; SaAisErrorT error = SA_AIS_OK; EDS_CB *eds_cb = NULL; int term_fd; TRACE_ENTER(); if (NULL == (eds_cb = (EDS_CB *)ncshm_take_hdl(NCS_SERVICE_ID_EDS, gl_eds_hdl))) { LOG_ER("Global take handle failed"); return; } mbx_fd = m_NCS_IPC_GET_SEL_OBJ(&eds_cb->mbx); /* Give back the handle */ ncshm_give_hdl(gl_eds_hdl); /* Initialize with IMM */ if (eds_imm_init(eds_cb) != SA_AIS_OK) { LOG_ER("Imm Init Failed. Exiting"); exit(EXIT_FAILURE); } daemon_sigterm_install(&term_fd); /* Set up all file descriptors to listen to */ fds[FD_TERM].fd = term_fd; fds[FD_TERM].events = POLLIN; fds[FD_AMF].fd = eds_cb->amfSelectionObject; fds[FD_AMF].events = POLLIN; fds[FD_CLM].fd = eds_cb->clm_sel_obj; fds[FD_CLM].events = POLLIN; fds[FD_MBCSV].fd = eds_cb->mbcsv_sel_obj; fds[FD_MBCSV].events = POLLIN; fds[FD_MBX].fd = mbx_fd.rmv_obj; fds[FD_MBX].events = POLLIN; fds[FD_IMM].fd = eds_cb->imm_sel_obj; fds[FD_IMM].events = POLLIN; TRACE("Entering the forever loop"); while (1) { if ((eds_cb->immOiHandle != 0) && (eds_cb->is_impl_set == true)){ fds[FD_IMM].fd = eds_cb->imm_sel_obj; fds[FD_IMM].events = POLLIN; nfds = NUM_FD; } else { nfds = NUM_FD - 1; } int ret = poll(fds, nfds, -1); if (ret == -1) { if (errno == EINTR) continue; TRACE("poll failed - %s", strerror(errno)); break; } /* process the sigterm */ if (fds[FD_TERM].revents & POLLIN) { daemon_exit(); } /* process all the AMF messages */ if (fds[FD_AMF].revents & POLLIN) { /* dispatch all the AMF pending callbacks */ error = saAmfDispatch(eds_cb->amf_hdl, SA_DISPATCH_ALL); if (error != SA_AIS_OK) LOG_ER("AMF Dispatch failed with rc = %d",error); } /* process all mbcsv messages */ if (fds[FD_MBCSV].revents & POLLIN) { error = eds_mbcsv_dispatch(eds_cb->mbcsv_hdl); if (NCSCC_RC_SUCCESS != error) LOG_ER("MBCSv Dispatch failed with rc = %d",error); } /* Process the EDS Mail box, if eds is ACTIVE. */ if (fds[FD_MBX].revents & POLLIN) { /* now got the IPC mail box event */ eds_process_mbx(mbx); } /* process the CLM messages */ if (fds[FD_CLM].revents & POLLIN) { /* dispatch all the AMF pending callbacks */ error = saClmDispatch(eds_cb->clm_hdl, SA_DISPATCH_ALL); if (error != SA_AIS_OK) LOG_ER("CLM Dispatch failed with rc = %d",error); } /* process the IMM messages */ if (eds_cb->immOiHandle && fds[FD_IMM].revents & POLLIN) { /* dispatch the IMM event */ error = saImmOiDispatch(eds_cb->immOiHandle, SA_DISPATCH_ONE); /* ** BAD_HANDLE is interpreted as an IMM service restart. Try ** reinitialize the IMM OI API in a background thread and let ** this thread do business as usual especially handling write ** requests. ** ** All other errors are treated as non-recoverable (fatal) and will ** cause an exit of the process. */ if (error == SA_AIS_ERR_BAD_HANDLE) { TRACE("saImmOiDispatch returned BAD_HANDLE"); /* Invalidate the IMM OI handle. */ saImmOiFinalize(eds_cb->immOiHandle); eds_cb->immOiHandle = 0; eds_cb->is_impl_set = false; eds_imm_reinit_bg(eds_cb); } else if (error != SA_AIS_OK) { LOG_ER("saImmOiDispatch FAILED with rc = %d", error); break; } } } TRACE_LEAVE(); return; } /* End eds_main_process() */