void ssl_connect(int s, short event, void *arg) { struct ctl_tcp_event *cte = arg; int retry_flag = 0; int tls_err = 0; int ret; if (event == EV_TIMEOUT) { cte->host->up = HOST_DOWN; hce_notify_done(cte->host, HCE_TLS_CONNECT_TIMEOUT); ssl_cleanup(cte); return; } ret = SSL_connect(cte->ssl); if (ret <= 0) { tls_err = SSL_get_error(cte->ssl, ret); switch (tls_err) { case SSL_ERROR_WANT_READ: retry_flag = EV_READ; goto retry; case SSL_ERROR_WANT_WRITE: retry_flag = EV_WRITE; goto retry; default: cte->host->up = HOST_DOWN; ssl_error(cte->host->conf.name, "cannot connect"); hce_notify_done(cte->host, HCE_TLS_CONNECT_FAIL); ssl_cleanup(cte); return; } } if (cte->table->conf.check == CHECK_TCP) { cte->host->up = HOST_UP; hce_notify_done(cte->host, HCE_TLS_CONNECT_OK); ssl_cleanup(cte); return; } if (cte->table->sendbuf != NULL) { event_again(&cte->ev, cte->s, EV_TIMEOUT|EV_WRITE, ssl_write, &cte->tv_start, &cte->table->conf.timeout, cte); return; } if ((cte->buf = ibuf_dynamic(SMALL_READ_BUF_SIZE, UINT_MAX)) == NULL) fatalx("ssl_connect: cannot create dynamic buffer"); event_again(&cte->ev, cte->s, EV_TIMEOUT|EV_READ, ssl_read, &cte->tv_start, &cte->table->conf.timeout, cte); return; retry: event_again(&cte->ev, s, EV_TIMEOUT|retry_flag, ssl_connect, &cte->tv_start, &cte->table->conf.timeout, cte); }
static char * run_all_tests() { printf("Running test_new_success()...\n"); mu_run_test(test_new_success); printf("Done!\nRunning test_new_fail1()...\n"); mu_run_test(test_new_fail1); mu_run_test(test_new_fail0); ssl_cleanup(); return NULL; }
int main(int argc, char **argv) { SSL *conn; char *keytab = "tmp.keytab"; char *servername = "rekey.andrew.cmu.edu"; char *princname = REKEY_DEF_SERVICE; int optch; int flag=0; while ((optch = getopt(argc, argv, "k:s:P:d")) != -1) { switch (optch) { case 'k': keytab = optarg; break; case 's': servername = optarg; break; case 'P': princname = optarg; break; case 'd': flag|=REQFLAG_DESONLY; break; case '?': fprintf(stderr, "Usage: rekeytest [-k keytab] [-r realm] [-s servername] [-P serverprinc]\n [-d] [princ [hostname...] \n"); exit(1); } } ssl_startup(); conn=c_connect(servername); printf("Attach to remote server if required, then press return\n"); getc(stdin); c_auth(conn, servername, princname); if (argc > 2) c_newreq(conn, argv[1], flag, argc - 2, argv + 2); else if (argc == 2) c_status(conn, argv[1]); else c_getkeys(conn, keytab, 0, NULL, 0); SSL_shutdown(conn); SSL_free(conn); ssl_cleanup(); return 0; }
void fatal(const char *s, int recoverable) { int i; putlog(LOG_MISC, "*", "* %s", s); flushlogs(); for (i = 0; i < dcc_total; i++) if (dcc[i].sock >= 0) killsock(dcc[i].sock); #ifdef TLS ssl_cleanup(); #endif unlink(pid_file); if (recoverable != 1) { bg_send_quit(BG_ABORT); exit(!recoverable); } }
void ssl_transaction(struct ctl_tcp_event *cte) { if (cte->ssl == NULL) { cte->ssl = SSL_new(cte->table->ssl_ctx); if (cte->ssl == NULL) { ssl_error(cte->host->conf.name, "cannot create object"); fatal("cannot create SSL object"); } } if (SSL_set_fd(cte->ssl, cte->s) == 0) { cte->host->up = HOST_UNKNOWN; ssl_error(cte->host->conf.name, "cannot set fd"); ssl_cleanup(cte); hce_notify_done(cte->host, HCE_TLS_CONNECT_ERROR); return; } SSL_set_connect_state(cte->ssl); event_again(&cte->ev, cte->s, EV_TIMEOUT|EV_WRITE, ssl_connect, &cte->tv_start, &cte->table->conf.timeout, cte); }
void ssl_read(int s, short event, void *arg) { char rbuf[SMALL_READ_BUF_SIZE]; struct ctl_tcp_event *cte = arg; int retry_flag = EV_READ; int tls_err = 0; int ret; if (event == EV_TIMEOUT) { cte->host->up = HOST_DOWN; ssl_cleanup(cte); hce_notify_done(cte->host, HCE_TLS_READ_TIMEOUT); return; } bzero(rbuf, sizeof(rbuf)); ret = SSL_read(cte->ssl, rbuf, sizeof(rbuf)); if (ret <= 0) { tls_err = SSL_get_error(cte->ssl, ret); switch (tls_err) { case SSL_ERROR_WANT_READ: retry_flag = EV_READ; goto retry; case SSL_ERROR_WANT_WRITE: retry_flag = EV_WRITE; goto retry; case SSL_ERROR_ZERO_RETURN: /* FALLTHROUGH */ case SSL_ERROR_SYSCALL: if (ret == 0) { cte->host->up = HOST_DOWN; (void)cte->validate_close(cte); ssl_cleanup(cte); hce_notify_done(cte->host, cte->host->he); return; } /* FALLTHROUGH */ default: cte->host->up = HOST_DOWN; ssl_error(cte->host->conf.name, "cannot read"); ssl_cleanup(cte); hce_notify_done(cte->host, HCE_TLS_READ_ERROR); break; } return; } if (ibuf_add(cte->buf, rbuf, ret) == -1) fatal("ssl_read: buf_add error"); if (cte->validate_read != NULL) { if (cte->validate_read(cte) != 0) goto retry; ssl_cleanup(cte); hce_notify_done(cte->host, cte->host->he); return; } retry: event_again(&cte->ev, s, EV_TIMEOUT|retry_flag, ssl_read, &cte->tv_start, &cte->table->conf.timeout, cte); return; }
int net_deploy (void) { if (((int) net_opts.thread_l + (int) net_opts.thread_r) > net_opts.max_worker_threads) { print_str ( "ERROR: net_deploy: requested thread count exceeds 'max_worker_threads' [%d/%d]\n", ((int) net_opts.thread_l + (int) net_opts.thread_r), net_opts.max_worker_threads); return -1; } //_m_tid = getpid(); /*#ifdef M_ARENA_TEST mallopt (M_ARENA_TEST, 1); #endif*/ #ifdef M_ARENA_MAX mallopt (M_ARENA_MAX, 1); #endif struct sigaction sa_wthrd, sa_mthrd; sa_wthrd.sa_handler = sig_handler_null; sa_wthrd.sa_flags = SA_RESTART; sigfillset (&sa_wthrd.sa_mask); //sigaddset(&sa_wthrd.sa_mask, SIGIO); //sigaddset(&sa_wthrd.sa_mask, SIGUSR1); sigaction (SIGIO, &sa_wthrd, NULL); sigaction (SIGURG, &sa_wthrd, NULL); sigaction (SIGUSR1, &sa_wthrd, NULL); sa_mthrd.sa_handler = net_def_sig_handler; sa_mthrd.sa_flags = SA_RESTART; sigfillset (&sa_mthrd.sa_mask); //sigaddset(&sa_mthrd.sa_mask, SIGUSR2); sigaction (SIGUSR2, &sa_mthrd, NULL); sigset_t set; sigemptyset (&set); sigaddset (&set, SIGPIPE); sigaddset (&set, SIGURG); sigaddset (&set, SIGIO); sigaddset (&set, SIGUSR1); sigaddset (&set, SIGHUP); int sr = pthread_sigmask (SIG_BLOCK, &set, NULL); uint32_t in_f = 0; if (sr != 0) { print_str ("ERROR: net_deploy: pthread_sigmask failed: %d\n", sr); abort (); } md_init_le (&_sock_r, (int) net_opts.max_sock); md_init_le (&_net_thrd_r, (int) net_opts.max_worker_threads); //md_init_le (&_thrd_r_common, 32); md_init_le (&tasks_in, MAX_TASKS_GLOBAL); md_init_le (&fs_jobs, 1024); if (net_opts.flags & F_NETOPT_SSLINIT) { print_str ("DEBUG: initializing TLS/SSL subsystem..\n"); ssl_init (); in_f |= F_ND_SSL_INIT; } int r; po_thrd task_worker_object; if ((r = thread_create (task_worker, 0, NULL, 0, 0, F_THRD_NOWPID | F_THRD_NOREG, F_THC_SKIP_IO, NULL, &task_worker_object, NULL))) { return r; } if ((r = spawn_threads (net_opts.thread_l, net_worker, 0, &_net_thrd_r, THREAD_ROLE_NET_WORKER, SOCKET_OPMODE_LISTENER, 0))) { print_str ("ERROR: spawn_threads failed [SOCKET_OPMODE_LISTENER]: %d\n", r); return 2; } else { print_str ( "DEBUG: deployed %hu socket worker threads [SOCKET_OPMODE_LISTENER]\n", net_opts.thread_l); } if ((r = spawn_threads (net_opts.thread_r, net_worker, 0, &_net_thrd_r, THREAD_ROLE_NET_WORKER, SOCKET_OPMODE_RECIEVER, 0))) { print_str ("ERROR: spawn_threads failed [SOCKET_OPMODE_RECIEVER]: %d\n", r); return 2; } else { print_str ( "DEBUG: deployed %hu socket worker threads [SOCKET_OPMODE_RECIEVER]\n", net_opts.thread_r); } print_str ("DEBUG: waiting for workers to initialize..\n"); if (net_deploy_wait_for_all_threads (&_net_thrd_r)) { print_str ("D5: all workers online\n"); } else { goto _t_kill; } int fail; if ((fail = process_ca_requests (&_boot_pca, F_OPSOCK_LISTEN))) { if ((off_t) fail == _boot_pca.offset) { print_str ( "WARNING: process_ca_requests: no socket requests succeeded\n"); goto _t_kill; } else { print_str ( "WARNING: process_ca_requests: not all socket requests were succesfull\n"); } } else { print_str ("DEBUG: deployed %llu listener socket(s)\n", (uint64_t) _boot_pca.offset); } if (fs_jobs.offset) { __gfs job = (__gfs ) fs_jobs.first->ptr; if (job->id < 1 || job->id > USHRT_MAX) { print_str ("ERROR: invalid job id\n"); goto _ts_kill; } if (NULL == job->link || !strlen (job->link)) { print_str ("ERROR: job [%d] has no path\n", job->id); goto _ts_kill; } int ret; if (!(ret = fs_link_socks_to_job (job, &_sock_r))) { print_str ("ERROR: job [%d] defined but no sockets link to it\n", job->id); goto _ts_kill; } //job->status |= FS_GFS_JOB_LOPEN; print_str ("DEBUG: %d sockets linked to job [%d]\n", ret, job->id); if ((r = spawn_threads (1, fs_worker, 0, &_net_thrd_r, THREAD_ROLE_FS_WORKER, 0, 0))) { print_str ( "ERROR: spawn_threads failed [SOCKET_OPMODE_LISTENER]: %d\n", r); return 2; } if (!net_deploy_wait_for_all_threads (&_net_thrd_r)) { print_str ("ERROR: could not spawn fs_worker thread\n"); goto _t_kill; } } if (net_opts.flags & F_NETOPT_CHROOT) { if (chroot (net_opts.chroot) == -1) { char err_buf[1024]; print_str ( "ERROR: netctl_opt_parse: '%s': chroot failed: [%d] [%s]\n", net_opts.chroot, errno, strerror_r (errno, err_buf, sizeof(err_buf))); goto _ts_kill; } print_str ("NOTICE: chrooted %s\n", net_opts.chroot); } if (net_opts.flags & F_NETOPT_GID) { if (setgid (net_opts.gid) == -1) { char e_buffer[1024]; print_str ("ERROR: setgid failed: %s\n", strerror_r (errno, e_buffer, sizeof(e_buffer))); goto _ts_kill; } else { print_str ("DEBUG: setgid: %u\n", (unsigned int) net_opts.gid); } } if (net_opts.flags & F_NETOPT_UID) { if (setuid (net_opts.uid) == -1) { char e_buffer[1024]; print_str ("ERROR: setuid failed: %s\n", strerror_r (errno, e_buffer, sizeof(e_buffer))); goto _ts_kill; } else { print_str ("DEBUG: setuid: %u\n", (unsigned int) net_opts.uid); } } if (net_opts.flags & (F_NETOPT_HUSER | F_NETOPT_HGROUP)) { if (net_opts.flags & F_NETOPT_HUSER) { snprintf (G_USER, sizeof(G_USER), "%s", net_opts.user); gfl0 |= F_OPT_SETUID; } if (net_opts.flags & F_NETOPT_HGROUP) { snprintf (G_GROUP, sizeof(G_GROUP), "%s", net_opts.group); gfl0 |= F_OPT_SETGID; } g_setxid (); } //htest(); if ( NULL != ftp_cmd) { net_ftp_init (ftp_cmd); } net_pop_rc (NULL, &net_post_init_rc); thread_broadcast_sig (&_net_thrd_r, SIGINT); if ((fail = process_ca_requests (&_boot_pca, F_OPSOCK_CONNECT))) { if ((off_t) fail == _boot_pca.offset) { print_str ( "WARNING: process_ca_requests: no socket requests succeeded\n"); goto _t_kill; } else { print_str ( "WARNING: process_ca_requests: not all socket requests were succesfull\n"); } } while (g_get_gkill ()) { //net_ping_threads(); sleep (-1); } _ts_kill: ; if (register_count (&_sock_r)) { print_str ("DEBUG: sending F_OPSOCK_TERM to all sockets\n"); net_nw_ssig_term_r (&_sock_r); } _t_kill: ; print_str ("DEBUG: sending F_THRD_TERM to all worker threads\n"); thread_broadcast_kill (&_net_thrd_r); print_str ("DEBUG: waiting for threads to exit..\n"); if ((r = thread_join_threads (&_net_thrd_r))) { print_str ("WARNING: %d threads remaining\n", r); } thread_send_kill (task_worker_object); pthread_join (task_worker_object->pt, NULL); md_g_free_l (&_net_thrd_r); md_g_free_l (&_sock_r); free (pc_a.objects); md_g_free_l (&_boot_pca); md_g_free_l (&tasks_in); md_g_free_l (&fs_jobs); if (in_f & F_ND_SSL_INIT) { print_str ("DEBUG: releasing TLS/SSL resources..\n"); ssl_cleanup (); } print_str ("INFO: server shutting down..\n"); return 0; }
int mainloop(int toplevel) { static int socket_cleanup = 0; int xx, i, eggbusy = 1, tclbusy = 0; char buf[520]; /* Lets move some of this here, reducing the numer of actual * calls to periodic_timers */ now = time(NULL); /* * FIXME: Get rid of this, it's ugly and wastes lots of cpu. * * pre-1.3.0 Eggdrop had random() in the once a second block below. * * This attempts to keep random() more random by constantly * calling random() and updating the state information. */ random(); /* Woop, lets really jumble things */ /* If we want to restart, we have to unwind to the toplevel. * Tcl will Panic if we kill the interp with Tcl_Eval in progress. * This is done by returning -1 in tickle_WaitForEvent. */ if (do_restart && do_restart != -2 && !toplevel) return -1; /* Once a second */ if (now != then) { call_hook(HOOK_SECONDLY); then = now; } /* Only do this every so often. */ if (!socket_cleanup) { socket_cleanup = 5; /* Remove dead dcc entries. */ dcc_remove_lost(); /* Check for server or dcc activity. */ dequeue_sockets(); } else socket_cleanup--; /* Free unused structures. */ garbage_collect(); xx = sockgets(buf, &i); if (xx >= 0) { /* Non-error */ int idx; for (idx = 0; idx < dcc_total; idx++) if (dcc[idx].sock == xx) { if (dcc[idx].type && dcc[idx].type->activity) { /* Traffic stats */ if (dcc[idx].type->name) { if (!strncmp(dcc[idx].type->name, "BOT", 3)) itraffic_bn_today += strlen(buf) + 1; else if (!strcmp(dcc[idx].type->name, "SERVER")) itraffic_irc_today += strlen(buf) + 1; else if (!strncmp(dcc[idx].type->name, "CHAT", 4)) itraffic_dcc_today += strlen(buf) + 1; else if (!strncmp(dcc[idx].type->name, "FILES", 5)) itraffic_dcc_today += strlen(buf) + 1; else if (!strcmp(dcc[idx].type->name, "SEND")) itraffic_trans_today += strlen(buf) + 1; else if (!strncmp(dcc[idx].type->name, "GET", 3)) itraffic_trans_today += strlen(buf) + 1; else itraffic_unknown_today += strlen(buf) + 1; } dcc[idx].type->activity(idx, buf, i); } else putlog(LOG_MISC, "*", "!!! untrapped dcc activity: type %s, sock %d", dcc[idx].type->name, dcc[idx].sock); break; } } else if (xx == -1) { /* EOF from someone */ int idx; if (i == STDOUT && !backgrd) fatal("END OF FILE ON TERMINAL", 0); for (idx = 0; idx < dcc_total; idx++) if (dcc[idx].sock == i) { if (dcc[idx].type && dcc[idx].type->eof) dcc[idx].type->eof(idx); else { putlog(LOG_MISC, "*", "*** ATTENTION: DEAD SOCKET (%d) OF TYPE %s UNTRAPPED", i, dcc[idx].type ? dcc[idx].type->name : "*UNKNOWN*"); killsock(i); lostdcc(idx); } idx = dcc_total + 1; } if (idx == dcc_total) { putlog(LOG_MISC, "*", "(@) EOF socket %d, not a dcc socket, not anything.", i); close(i); killsock(i); } } else if (xx == -2 && errno != EINTR) { /* select() error */ putlog(LOG_MISC, "*", "* Socket error #%d; recovering.", errno); for (i = 0; i < dcc_total; i++) { if ((fcntl(dcc[i].sock, F_GETFD, 0) == -1) && (errno == EBADF)) { putlog(LOG_MISC, "*", "DCC socket %d (type %d, name '%s') expired -- pfft", dcc[i].sock, dcc[i].type, dcc[i].nick); killsock(dcc[i].sock); lostdcc(i); i--; } } } else if (xx == -3) { call_hook(HOOK_IDLE); socket_cleanup = 0; /* If we've been idle, cleanup & flush */ eggbusy = 0; } else if (xx == -5) { eggbusy = 0; tclbusy = 1; } if (do_restart) { if (do_restart == -2) rehash(); else if (!toplevel) return -1; /* Unwind to toplevel before restarting */ else { /* Unload as many modules as possible */ int f = 1; module_entry *p; Function startfunc; char name[256]; /* oops, I guess we should call this event before tcl is restarted */ check_tcl_event("prerestart"); while (f) { f = 0; for (p = module_list; p != NULL; p = p->next) { dependancy *d = dependancy_list; int ok = 1; while (ok && d) { if (d->needed == p) ok = 0; d = d->next; } if (ok) { strcpy(name, p->name); if (module_unload(name, botnetnick) == NULL) { f = 1; break; } } } } /* Make sure we don't have any modules left hanging around other than * "eggdrop" and the two that are supposed to be. */ for (f = 0, p = module_list; p; p = p->next) { if (strcmp(p->name, "eggdrop") && strcmp(p->name, "encryption") && strcmp(p->name, "uptime")) { f++; } } if (f != 0) { putlog(LOG_MISC, "*", MOD_STAGNANT); } flushlogs(); kill_tcl(); init_tcl(argc, argv); init_language(0); /* this resets our modules which we didn't unload (encryption and uptime) */ for (p = module_list; p; p = p->next) { if (p->funcs) { startfunc = p->funcs[MODCALL_START]; startfunc(NULL); } } rehash(); #ifdef TLS ssl_cleanup(); ssl_init(); #endif restart_chons(); call_hook(HOOK_LOADED); } eggbusy = 1; do_restart = 0; } #ifdef USE_TCL_EVENTS if (!eggbusy) { /* Process all pending tcl events */ # ifdef REPLACE_NOTIFIER if (Tcl_ServiceAll()) tclbusy = 1; # else while (Tcl_DoOneEvent(TCL_DONT_WAIT | TCL_ALL_EVENTS)) tclbusy = 1; # endif /* REPLACE_NOTIFIER */ #endif /* USE_TCL_EVENTS */ } return (eggbusy || tclbusy); }