static void *mc_receive_thread (void *arg) /* {{{ */ { socket_entry_t *mc_receive_socket_entries; int status; size_t i; mc_receive_socket_entries = NULL; status = create_sockets (&mc_receive_socket_entries, &mc_receive_sockets_num, (mc_receive_group != NULL) ? mc_receive_group : MC_RECEIVE_GROUP_DEFAULT, (mc_receive_port != NULL) ? mc_receive_port : MC_RECEIVE_PORT_DEFAULT, /* listen = */ 1); if (status != 0) { ERROR ("gmond plugin: create_sockets failed."); return ((void *) -1); } mc_receive_sockets = (struct pollfd *) calloc (mc_receive_sockets_num, sizeof (*mc_receive_sockets)); if (mc_receive_sockets == NULL) { ERROR ("gmond plugin: calloc failed."); for (i = 0; i < mc_receive_sockets_num; i++) close (mc_receive_socket_entries[i].fd); free (mc_receive_socket_entries); mc_receive_socket_entries = NULL; mc_receive_sockets_num = 0; return ((void *) -1); } for (i = 0; i < mc_receive_sockets_num; i++) { mc_receive_sockets[i].fd = mc_receive_socket_entries[i].fd; mc_receive_sockets[i].events = POLLIN | POLLPRI; mc_receive_sockets[i].revents = 0; } while (mc_receive_thread_loop != 0) { status = poll (mc_receive_sockets, mc_receive_sockets_num, -1); if (status <= 0) { char errbuf[1024]; if (errno == EINTR) continue; ERROR ("gmond plugin: poll failed: %s", sstrerror (errno, errbuf, sizeof (errbuf))); break; } for (i = 0; i < mc_receive_sockets_num; i++) { if (mc_receive_sockets[i].revents != 0) mc_handle_socket (mc_receive_sockets + i); } } /* while (mc_receive_thread_loop != 0) */ free (mc_receive_socket_entries); return ((void *) 0); } /* }}} void *mc_receive_thread */
bool RtpSession::setup( uint16_t base_port, const char* cname, const char* peer_addr, uint16_t peer_port, uint8_t payload_type, uint32_t rtp_clock ) { if ( rtp_clock == 0 ) { printf("RtpSession: RTPClock is 0.. aborting..\n"); return false; } m_payload_type = payload_type; m_rtp_clock = rtp_clock; m_ssrc = (uint32_t)my_random(); m_base_timestamp = (uint32_t)my_random(); m_base_seq = (uint16_t)my_random(); m_last_timestamp = m_base_timestamp; m_prev_timestamp = m_base_timestamp; m_seq = m_base_seq; if ( ! create_sockets( base_port ) ) { printf("RtpSession: cannot create socket pairs\n"); return false; } if ( ! setup_peer( peer_addr, peer_port ) ) { printf("RtpSession: cannot setup pear host\n"); return false; } set_cname( cname ); return true; }
int main(int argc, char **argv){ if (argc != 5){ printf("Wrong args\n"); exit(0); } fd = open(argv[4], O_WRONLY|O_CREAT); if (fd == -1){ printf("Error opening log file\n"); exit(1); } connected = 0; all_get = 0; start_seq_num = (unsigned int)rand(); expected_seq = 0; last_seq = 0; passive_fin = 0; create_sockets(); ini_send_addr(argv[2]); int recv_port = ini_recv_addr(); conn_server(argv[1], argv[2], argv[3], recv_port); printf("Connection established!\n"); receive_data(argv[1], argv[2], argv[3], recv_port); disconnect(argv[1], argv[2], argv[3], recv_port); close_sockets(); close(fd); return 0; }
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(size_t read_slice_size) { int sv[2]; grpc_endpoint_pair p; create_sockets(sv); p.client = grpc_tcp_create(grpc_fd_create(sv[1]), read_slice_size); p.server = grpc_tcp_create(grpc_fd_create(sv[0]), read_slice_size); return p; }
/** * Initialization based on command line args */ void initialize() { char hostname[256]; struct hostent *hp; struct in_addr *addr; int i; parent = 0; // Load list of multicast interfaces if (interface_count == 0) { for (i = 0; i < ifl_len; i++) { if (!ifl[i].isloopback) { m_interface[interface_count++] = ifl[i]; } } } // No non-loopback interfaces, so just use the hostname's interface if (interface_count == 0) { gethostname(hostname, sizeof(hostname)); if ((hp = gethostbyname(hostname)) == NULL) { fprintf(stderr, "Can't get host name\n"); exit(1); } else { addr = (struct in_addr *)hp->h_addr_list[0]; m_interface[interface_count].addr = *addr; m_interface[interface_count++].ismulti = 1; m_interface[interface_count++].isloopback = 0; } } if (uid) { m_interface[interface_count].addr.s_addr = uid; m_interface[interface_count++].ismulti = 0; m_interface[interface_count++].isloopback = 0; } if (proxy_type == CLIENT_PROXY) { pub_multi_count = 0; } else if (!pub_multi_count) { pub_multi[0].s_addr = inet_addr(DEF_PUB_MULTI); pub_multi_count = 1; } for (i = 0; i < MAXLIST; i++) { group_list[i].group_id = 0; } next_hb_time.tv_sec = 0; next_hb_time.tv_usec = 0; last_key_req.tv_sec = 0; last_key_req.tv_usec = 0; atexit(cleanup); key_init(); create_sockets(); daemonize(); showtime = 1; }
/* Write to a socket using the grpc_tcp API, then drain it directly. Note that if the write does not complete immediately we need to drain the socket in parallel with the read. */ static void write_test(ssize_t num_bytes, ssize_t slice_size) { int sv[2]; grpc_endpoint *ep; struct write_socket_state state; ssize_t read_bytes; size_t num_blocks; gpr_slice *slices; int current_data = 0; gpr_slice_buffer outgoing; grpc_iomgr_closure write_done_closure; gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20); gpr_log(GPR_INFO, "Start write test with %d bytes, slice size %d", num_bytes, slice_size); create_sockets(sv); ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"), GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test"); grpc_endpoint_add_to_pollset(ep, &g_pollset); state.ep = ep; state.write_done = 0; slices = allocate_blocks(num_bytes, slice_size, &num_blocks, ¤t_data); gpr_slice_buffer_init(&outgoing); gpr_slice_buffer_addn(&outgoing, slices, num_blocks); grpc_iomgr_closure_init(&write_done_closure, write_done, &state); switch (grpc_endpoint_write(ep, &outgoing, &write_done_closure)) { case GRPC_ENDPOINT_DONE: /* Write completed immediately */ read_bytes = drain_socket(sv[0]); GPR_ASSERT(read_bytes == num_bytes); break; case GRPC_ENDPOINT_PENDING: drain_socket_blocking(sv[0], num_bytes, num_bytes); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); for (;;) { grpc_pollset_worker worker; if (state.write_done) { break; } grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline); } gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); break; case GRPC_ENDPOINT_ERROR: gpr_log(GPR_ERROR, "endpoint got error"); abort(); } gpr_slice_buffer_destroy(&outgoing); grpc_endpoint_destroy(ep); gpr_free(slices); }
/* Write to a socket using the grpc_tcp API, then drain it directly. Note that if the write does not complete immediately we need to drain the socket in parallel with the read. */ static void write_test(size_t num_bytes, size_t slice_size) { int sv[2]; grpc_endpoint *ep; struct write_socket_state state; size_t num_blocks; grpc_slice *slices; uint8_t current_data = 0; grpc_slice_buffer outgoing; grpc_closure write_done_closure; gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_log(GPR_INFO, "Start write test with %" PRIuPTR " bytes, slice size %" PRIuPTR, num_bytes, slice_size); create_sockets(sv); grpc_resource_quota *resource_quota = grpc_resource_quota_create("write_test"); ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"), resource_quota, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test"); grpc_resource_quota_unref_internal(&exec_ctx, resource_quota); grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset); state.ep = ep; state.write_done = 0; slices = allocate_blocks(num_bytes, slice_size, &num_blocks, ¤t_data); grpc_slice_buffer_init(&outgoing); grpc_slice_buffer_addn(&outgoing, slices, num_blocks); grpc_closure_init(&write_done_closure, write_done, &state, grpc_schedule_on_exec_ctx); grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure); drain_socket_blocking(sv[0], num_bytes, num_bytes); gpr_mu_lock(g_mu); for (;;) { grpc_pollset_worker *worker = NULL; if (state.write_done) { break; } GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); } gpr_mu_unlock(g_mu); grpc_slice_buffer_destroy_internal(&exec_ctx, &outgoing); grpc_endpoint_destroy(&exec_ctx, ep); gpr_free(slices); grpc_exec_ctx_finish(&exec_ctx); }
static grpc_end2end_test_fixture chttp2_create_fixture_socketpair( grpc_channel_args *client_args, grpc_channel_args *server_args) { sp_fixture_data *fixture_data = gpr_malloc(sizeof(*fixture_data)); grpc_end2end_test_fixture f; memset(&f, 0, sizeof(f)); f.fixture_data = fixture_data; f.cq = grpc_completion_queue_create(NULL); create_sockets(fixture_data->fd_pair); return f; }
/* Write to a socket using the grpc_tcp API, then drain it directly. Note that if the write does not complete immediately we need to drain the socket in parallel with the read. */ static void write_test(size_t num_bytes, size_t slice_size) { int sv[2]; grpc_endpoint *ep; struct write_socket_state state; size_t num_blocks; gpr_slice *slices; gpr_uint8 current_data = 0; gpr_slice_buffer outgoing; grpc_closure write_done_closure; gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_log(GPR_INFO, "Start write test with %d bytes, slice size %d", num_bytes, slice_size); create_sockets(sv); ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"), GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test"); grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset); state.ep = ep; state.write_done = 0; slices = allocate_blocks(num_bytes, slice_size, &num_blocks, ¤t_data); gpr_slice_buffer_init(&outgoing); gpr_slice_buffer_addn(&outgoing, slices, num_blocks); grpc_closure_init(&write_done_closure, write_done, &state); grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure); drain_socket_blocking(sv[0], num_bytes, num_bytes); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); for (;;) { grpc_pollset_worker worker; if (state.write_done) { break; } grpc_pollset_work(&exec_ctx, &g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); } gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_slice_buffer_destroy(&outgoing); grpc_endpoint_destroy(&exec_ctx, ep); gpr_free(slices); grpc_exec_ctx_finish(&exec_ctx); }
MTC_STATIC MTC_STATUS script_initialize0(void) { int pthread_ret; pthread_ret = pthread_spin_init(&lock, 0); if (fist_on("sc.pthread")) pthread_ret = FIST_PTHREAD_ERRCODE; if (pthread_ret != 0) { log_internal(MTC_LOG_ERR, "SC: cannot create spin_lock. (sys %d)\n", pthread_ret); return MTC_ERROR_SC_PTHREAD; } return create_sockets(); }
static grpc_endpoint_test_fixture create_fixture_tcp_socketpair( size_t slice_size) { int sv[2]; grpc_endpoint_test_fixture f; create_sockets(sv); f.client_ep = grpc_tcp_create(grpc_fd_create(sv[0], "fixture:client"), slice_size, "test"); f.server_ep = grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server"), slice_size, "test"); grpc_endpoint_add_to_pollset(f.client_ep, &g_pollset); grpc_endpoint_add_to_pollset(f.server_ep, &g_pollset); return f; }
/* Write to a socket until it fills up, then read from it using the grpc_tcp API. */ static void large_read_test(size_t slice_size) { int sv[2]; grpc_endpoint *ep; struct read_socket_state state; ssize_t written_bytes; gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_log(GPR_INFO, "Start large read test, slice size %" PRIuPTR, slice_size); create_sockets(sv); grpc_resource_quota *resource_quota = grpc_resource_quota_create("large_read_test"); ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), resource_quota, slice_size, "test"); grpc_resource_quota_unref_internal(&exec_ctx, resource_quota); grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset); written_bytes = fill_socket(sv[0]); gpr_log(GPR_INFO, "Wrote %" PRIuPTR " bytes", written_bytes); state.ep = ep; state.read_bytes = 0; state.target_read_bytes = (size_t)written_bytes; grpc_slice_buffer_init(&state.incoming); grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx); grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb); gpr_mu_lock(g_mu); while (state.read_bytes < state.target_read_bytes) { grpc_pollset_worker *worker = NULL; GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); } GPR_ASSERT(state.read_bytes == state.target_read_bytes); gpr_mu_unlock(g_mu); grpc_slice_buffer_destroy_internal(&exec_ctx, &state.incoming); grpc_endpoint_destroy(&exec_ctx, ep); grpc_exec_ctx_finish(&exec_ctx); }
/* Write to a socket until it fills up, then read from it using the grpc_tcp API. */ static void large_read_test(ssize_t slice_size) { int sv[2]; grpc_endpoint *ep; struct read_socket_state state; ssize_t written_bytes; gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20); gpr_log(GPR_INFO, "Start large read test, slice size %d", slice_size); create_sockets(sv); ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), slice_size, "test"); grpc_endpoint_add_to_pollset(ep, &g_pollset); written_bytes = fill_socket(sv[0]); gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes); state.ep = ep; state.read_bytes = 0; state.target_read_bytes = written_bytes; gpr_slice_buffer_init(&state.incoming); grpc_iomgr_closure_init(&state.read_cb, read_cb, &state); switch (grpc_endpoint_read(ep, &state.incoming, &state.read_cb)) { case GRPC_ENDPOINT_DONE: read_cb(&state, 1); break; case GRPC_ENDPOINT_ERROR: read_cb(&state, 0); break; case GRPC_ENDPOINT_PENDING: break; } gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); while (state.read_bytes < state.target_read_bytes) { grpc_pollset_worker worker; grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline); } GPR_ASSERT(state.read_bytes == state.target_read_bytes); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_slice_buffer_destroy(&state.incoming); grpc_endpoint_destroy(ep); }
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name, size_t read_slice_size) { int sv[2]; grpc_endpoint_pair p; char *final_name; create_sockets(sv); gpr_asprintf(&final_name, "%s:client", name); p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name), read_slice_size, "socketpair-server"); gpr_free(final_name); gpr_asprintf(&final_name, "%s:server", name); p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name), read_slice_size, "socketpair-client"); gpr_free(final_name); return p; }
/** * Initialization based on command line args */ void initialize() { atexit(cleanup); if (strcmp(logfile, "")) { int fd; if ((fd = open(logfile, O_WRONLY | O_APPEND | O_CREAT, 0644)) == -1) { perror("Can't open log file"); exit(1); } dup2(fd, 2); close(fd); showtime = 1; } applog = stderr; key_init(); create_sockets(); // Size of non-data packets including message specific header. // sizeof(ip) + sizeof(udp) = 20 + 8 = 28 payloadsize = mtu - 28 - sizeof(struct uftp_h); // Size of encrypted packets // Leaves room for expansion due to symmetric key block size encpayloadsize = payloadsize - sizeof(struct encrypted_h) - KEYBLSIZE - ((sigtype == SIG_RSA) ? rsalen : hmaclen ); // Size of data block blocksize = ((keytype != KEY_NONE) ? encpayloadsize : payloadsize) - sizeof(struct fileseg_h); // Never ask for a client key with no encryption, // and always ask with RSA signatures if (keytype == KEY_NONE) { client_auth = 0; } else if (sigtype == SIG_RSA) { client_auth = 1; } if (rate == -1) { packet_wait = 0; } else { packet_wait = (int32_t)(1000000.0 * mtu / ((float)rate * 1024 / 8)); } }
static int gmond_init(void) /* {{{ */ { create_sockets( &mc_send_sockets, &mc_send_sockets_num, (mc_receive_group != NULL) ? mc_receive_group : MC_RECEIVE_GROUP_DEFAULT, (mc_receive_port != NULL) ? mc_receive_port : MC_RECEIVE_PORT_DEFAULT, /* listen = */ 0); staging_tree = c_avl_create((int (*)(const void *, const void *))strcmp); if (staging_tree == NULL) { ERROR("gmond plugin: c_avl_create failed."); return (-1); } mc_receive_thread_start(); return (0); } /* }}} int gmond_init */
static grpc_endpoint_test_fixture create_fixture_tcp_socketpair( size_t slice_size) { int sv[2]; grpc_endpoint_test_fixture f; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; create_sockets(sv); f.client_ep = grpc_tcp_create(grpc_fd_create(sv[0], "fixture:client"), slice_size, "test"); f.server_ep = grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server"), slice_size, "test"); grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, &g_pollset); grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, &g_pollset); grpc_exec_ctx_finish(&exec_ctx); return f; }
/* Write to a socket until it fills up, then read from it using the grpc_tcp API. */ static void large_read_test(size_t slice_size) { int sv[2]; grpc_endpoint *ep; struct read_socket_state state; ssize_t written_bytes; gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_log(GPR_INFO, "Start large read test, slice size %d", slice_size); create_sockets(sv); ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), slice_size, "test"); grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset); written_bytes = fill_socket(sv[0]); gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes); state.ep = ep; state.read_bytes = 0; state.target_read_bytes = (size_t)written_bytes; gpr_slice_buffer_init(&state.incoming); grpc_closure_init(&state.read_cb, read_cb, &state); grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); while (state.read_bytes < state.target_read_bytes) { grpc_pollset_worker worker; grpc_pollset_work(&exec_ctx, &g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); } GPR_ASSERT(state.read_bytes == state.target_read_bytes); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_slice_buffer_destroy(&state.incoming); grpc_endpoint_destroy(&exec_ctx, ep); grpc_exec_ctx_finish(&exec_ctx); }
static grpc_endpoint_test_fixture create_fixture_tcp_socketpair( size_t slice_size) { int sv[2]; grpc_endpoint_test_fixture f; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; create_sockets(sv); grpc_resource_quota *resource_quota = grpc_resource_quota_create("tcp_posix_test_socketpair"); f.client_ep = grpc_tcp_create(grpc_fd_create(sv[0], "fixture:client"), resource_quota, slice_size, "test"); f.server_ep = grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server"), resource_quota, slice_size, "test"); grpc_resource_quota_unref_internal(&exec_ctx, resource_quota); grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset); grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, g_pollset); grpc_exec_ctx_finish(&exec_ctx); return f; }
int main(void) { int input, output; create_sockets(&input, &output); bind_sockets("wlan0", input, "lo", output); /* main program part.. */ frame_t work; struct pollfd pollinput; pollinput.fd = input; pollinput.events = POLLIN; int mswait; while ( 1 ) { printf("%40d frames in buffer.\r", count_frames()); work.length = read(input, work.data.buffer, BIGGEST_FRAME); if ( work.length > 0 ) { translate_frame(&work); schedule_frame(&work); queue_frame(&work); // freaking woo hoo } dole_frame(output); mswait = ttn_frame(); poll(&pollinput, 1, mswait); } return 0; }
/** * Initialization based on command line args */ void initialize(void) { atexit(cleanup); init_log_mux = 1; init_log(0); #ifdef WINDOWS SetConsoleCtrlHandler(winsig, TRUE); #else { struct sigaction act; sigfillset(&act.sa_mask); act.sa_flags = SA_NOCLDSTOP | SA_NOCLDWAIT | SA_RESTART; act.sa_handler = gotsig; sigaction(SIGINT, &act, NULL); sigaction(SIGTERM, &act, NULL); act.sa_handler = SIG_IGN; sigaction(SIGPIPE, &act, NULL); sigaction(SIGCHLD, &act, NULL); } #endif if (!strcmp(statusfilename, "@LOG")) { status_file = applog; } else if (strcmp(statusfilename, "")) { if ((status_file = fopen(statusfilename, "at")) == NULL) { perror("Can't open status file"); exit(ERR_PARAM); } } key_init(); create_sockets(); // Size of data packet, used in transmission speed calculations datapacketsize = blocksize + sizeof(struct fileseg_h); if (cc_type == CC_TFMCC) { datapacketsize += sizeof(struct tfmcc_data_info_he); } if (keytype != KEY_NONE) { datapacketsize += ((sigtype == SIG_KEYEX) ? privkeylen : (sigtype == SIG_HMAC) ? hmaclen : 0) + KEYBLSIZE + sizeof(struct encrypted_h); } // 8 = UDP size, 20 = IPv4 size, 40 = IPv6 size if (listen_dest.ss.ss_family == AF_INET6) { datapacketsize += sizeof(struct uftp_h) + 8 + 40; } else { datapacketsize += sizeof(struct uftp_h) + 8 + 20; } // Never ask for a client key with no encryption, // and always ask with RSA/ECDSA signatures if (keytype == KEY_NONE) { client_auth = 0; } else if (sigtype == SIG_KEYEX) { client_auth = 1; } if (cc_type == CC_NONE || cc_type == CC_UFTP3) { if (rate == -1) { packet_wait = 0; } else { packet_wait = (int32_t)(1000000.0 * datapacketsize / rate); } } else if (cc_type == CC_TFMCC) { // Initialize the rate to the default rate for control message timing packet_wait = (int32_t)(1000000.0 * datapacketsize / rate); } }
/* Do a read_test, then release fd and try to read/write again. Verify that grpc_tcp_fd() is available before the fd is released. */ static void release_fd_test(size_t num_bytes, size_t slice_size) { int sv[2]; grpc_endpoint *ep; struct read_socket_state state; size_t written_bytes; int fd; gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_closure fd_released_cb; int fd_released_done = 0; grpc_closure_init(&fd_released_cb, &on_fd_released, &fd_released_done, grpc_schedule_on_exec_ctx); gpr_log(GPR_INFO, "Release fd read_test of size %" PRIuPTR ", slice size %" PRIuPTR, num_bytes, slice_size); create_sockets(sv); grpc_resource_quota *resource_quota = grpc_resource_quota_create("release_fd_test"); ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test"), resource_quota, slice_size, "test"); GPR_ASSERT(grpc_tcp_fd(ep) == sv[1] && sv[1] >= 0); grpc_resource_quota_unref_internal(&exec_ctx, resource_quota); grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset); written_bytes = fill_socket_partial(sv[0], num_bytes); gpr_log(GPR_INFO, "Wrote %" PRIuPTR " bytes", written_bytes); state.ep = ep; state.read_bytes = 0; state.target_read_bytes = written_bytes; grpc_slice_buffer_init(&state.incoming); grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx); grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb); gpr_mu_lock(g_mu); while (state.read_bytes < state.target_read_bytes) { grpc_pollset_worker *worker = NULL; GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline))); gpr_log(GPR_DEBUG, "wakeup: read=%" PRIdPTR " target=%" PRIdPTR, state.read_bytes, state.target_read_bytes); gpr_mu_unlock(g_mu); grpc_exec_ctx_flush(&exec_ctx); gpr_mu_lock(g_mu); } GPR_ASSERT(state.read_bytes == state.target_read_bytes); gpr_mu_unlock(g_mu); grpc_slice_buffer_destroy_internal(&exec_ctx, &state.incoming); grpc_tcp_destroy_and_release_fd(&exec_ctx, ep, &fd, &fd_released_cb); grpc_exec_ctx_flush(&exec_ctx); gpr_mu_lock(g_mu); while (!fd_released_done) { grpc_pollset_worker *worker = NULL; GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline))); gpr_log(GPR_DEBUG, "wakeup: fd_released_done=%d", fd_released_done); } gpr_mu_unlock(g_mu); GPR_ASSERT(fd_released_done == 1); GPR_ASSERT(fd == sv[1]); grpc_exec_ctx_finish(&exec_ctx); written_bytes = fill_socket_partial(sv[0], num_bytes); drain_socket_blocking(fd, written_bytes, written_bytes); written_bytes = fill_socket_partial(fd, num_bytes); drain_socket_blocking(sv[0], written_bytes, written_bytes); close(fd); }
/* this is the main function with the loops and modes */ void shoot(char *buf, int buff_size) { struct timespec sleep_ms_s, sleep_rem; int ret, cseqtmp, rand_tmp; char buf2[BUFSIZE], buf3[BUFSIZE], lport_str[LPORT_STR_LEN]; /* delays.retryAfter = DEFAULT_TIMEOUT; */ if (transport == SIP_UDP_TRANSPORT) { delays.retryAfter = timer_t1; } else { delays.retryAfter = timer_final; } inv_trans = 0; cseq_counter = 1; usrlocstep = REG_REP; /* initalize local vars */ cdata.dontsend=cdata.dontrecv=counters.retrans_r_c=counters.retrans_s_c= 0; delays.big_delay=counters.send_counter=counters.run= 0; timers.delaytime.tv_sec = 0; timers.delaytime.tv_usec = 0; usern = NULL; /* initialize local arrays */ memset(buf2, 0, BUFSIZE); memset(buf3, 0, BUFSIZE); memset(lport_str, 0, LPORT_STR_LEN); cdata.csock = cdata.usock = -1; cdata.connected = 0; cdata.buf_tmp = NULL; cdata.buf_tmp_size = 0; memset(&(timers.sendtime), 0, sizeof(timers.sendtime)); memset(&(timers.recvtime), 0, sizeof(timers.recvtime)); memset(&(timers.firstsendt), 0, sizeof(timers.firstsendt)); memset(&(timers.starttime), 0, sizeof(timers.starttime)); memset(&(timers.delaytime), 0, sizeof(timers.delaytime)); req = buf; rep = buf2; rec = buf3; create_sockets(&cdata); if (sleep_ms != 0) { if (sleep_ms == -2) { rand_tmp = rand(); sleep_ms_s.tv_sec = rand_tmp / 1000; sleep_ms_s.tv_nsec = (rand_tmp % 1000) * 1000000; } else { sleep_ms_s.tv_sec = sleep_ms / 1000; sleep_ms_s.tv_nsec = (sleep_ms % 1000) * 1000000; } } if (replace_b == 1){ replace_string(req, "$dsthost$", domainname); replace_string(req, "$srchost$", fqdn); sprintf(lport_str, "%i", lport); replace_string(req, "$port$", lport_str); if (username) replace_string(req, "$user$", username); } if (replace_str) replace_strings(req, replace_str); /* set all regular expression to simplfy the result code indetification */ regcomp(&(regexps.replyexp), "^SIP/[0-9]\\.[0-9] [1-6][0-9][0-9]", REG_EXTENDED|REG_NOSUB|REG_ICASE); regcomp(&(regexps.proexp), "^SIP/[0-9]\\.[0-9] 1[0-9][0-9] ", REG_EXTENDED|REG_NOSUB|REG_ICASE); regcomp(&(regexps.okexp), "^SIP/[0-9]\\.[0-9] 2[0-9][0-9] ", REG_EXTENDED|REG_NOSUB|REG_ICASE); regcomp(&(regexps.redexp), "^SIP/[0-9]\\.[0-9] 3[0-9][0-9] ", REG_EXTENDED|REG_NOSUB|REG_ICASE); regcomp(&(regexps.authexp), "^SIP/[0-9]\\.[0-9] 40[17] ", REG_EXTENDED|REG_NOSUB|REG_ICASE); regcomp(&(regexps.errexp), "^SIP/[0-9]\\.[0-9] 4[0-9][0-9] ", REG_EXTENDED|REG_NOSUB|REG_ICASE); regcomp(&(regexps.tmhexp), "^SIP/[0-9]\\.[0-9] 483 ", REG_EXTENDED|REG_NOSUB|REG_ICASE); if (username) { if (nameend > 0) { usern = str_alloc(strlen(username) + 12); create_usern(usern, username, namebeg); } else { if (*(username + strlen(username) - 1) != '@') { usern = str_alloc(strlen(username) + 2); create_usern(usern, username, -1); } else { usern = username; } } } if (usrloc == 1||invite == 1||message == 1){ /* calculate the number of required steps and create initial mes */ if (usrloc == 1) { create_msg(REQ_REG, req, NULL, usern, cseq_counter); usrlocstep=REG_REP; } else if (invite == 1) { create_msg(REQ_INV, req, rep, usern, cseq_counter); inv_trans = 1; usrlocstep=INV_RECV; } else { create_msg(REQ_MES, req, rep, usern, cseq_counter); if (mes_body) usrlocstep=MES_OK_RECV; else usrlocstep=MES_RECV; } } else if (trace == 1){ /* for trace we need some spezial initis */ namebeg=0; create_msg(REQ_OPT, req, NULL, usern, cseq_counter); set_maxforw(req, namebeg); } else if (flood == 1){ if (nameend<=0) nameend=INT_MAX; namebeg=1; create_msg(REQ_FLOOD, req, NULL, usern, cseq_counter); } else if (randtrash == 1){ counters.randretrys=0; namebeg=1; create_msg(REQ_RAND, req, NULL, usern, cseq_counter); nameend=(int)strlen(req); if (trashchar == 1){ if (trashchar < nameend) nameend=trashchar; else fprintf(stderr, "warning: number of trashed chars to big. setting to " "request length\n"); } trash_random(req); } else { /* for none of the modes we also need some inits */ if (file_b == 0) { namebeg=1; create_msg(REQ_OPT, req, NULL, usern, cseq_counter); } else { if (STRNCASECMP(req, INV_STR, INV_STR_LEN) == 0) { inv_trans = 1; } if(via_ins == 1) add_via(req); } /* delays.retryAfter = delays.retryAfter / 10; */ if(maxforw!=-1) set_maxforw(req, maxforw); } cdata.connected = set_target(&(cdata.adr), address, rport, cdata.csock, cdata.connected); /* here we go until someone decides to exit */ while(1) { before_sending(); if (sleep_ms == -2) { rand_tmp = rand(); sleep_ms_s.tv_sec = rand_tmp / 1000; sleep_ms_s.tv_nsec = (rand_tmp % 1000) * 1000; } if (sleep_ms != 0) { dbg("sleeping for %li s + %li ns\n", sleep_ms_s.tv_sec, sleep_ms_s.tv_nsec); nanosleep(&sleep_ms_s, &sleep_rem); } send_message(req, &cdata, &counters, &timers); /* in flood we are only interested in sending so skip the rest */ if (flood == 0) { ret = recv_message(rec, BUFSIZE, inv_trans, &delays, &timers, &counters, &cdata, ®exps); if(ret > 0) { if (usrlocstep == INV_OK_RECV) { swap_ptr(&rep, &req); } /* send ACK for non-provisional reply on INVITE */ if ((STRNCASECMP(req, "INVITE", 6)==0) && (regexec(&(regexps.replyexp), rec, 0, 0, 0) == REG_NOERROR) && (regexec(&(regexps.proexp), rec, 0, 0, 0) == REG_NOMATCH)) { build_ack(req, rec, rep, ®exps); cdata.dontsend = 0; inv_trans = 0; /* lets fire the ACK to the server */ send_message(rep, &cdata, &counters, &timers); inv_trans = 1; } /* check for old CSeq => ignore retransmission */ cseqtmp = cseq(rec); if ((0 < cseqtmp) && (cseqtmp < cseq_counter)) { if (verbose>0) { printf("ignoring retransmission\n"); } counters.retrans_r_c++; cdata.dontsend = 1; continue; } else if (regexec(&(regexps.authexp), rec, 0, 0, 0) == REG_NOERROR) { if (!username && !auth_username) { if (timing > 0) { timing--; if (timing == 0) { if (counters.run == 0) { counters.run++; } printf("%.3f/%.3f/%.3f ms\n", delays.small_delay, delays.all_delay / counters.run, delays.big_delay); exit_code(0, __PRETTY_FUNCTION__, NULL); } counters.run++; new_transaction(req, rep); delays.retryAfter = timer_t1; continue; } fprintf(stderr, "%s\nerror: received 40[17] but cannot " "authentication without a username or auth username\n", rec); log_message(req); exit_code(2, __PRETTY_FUNCTION__, "missing username for authentication"); } /* prevents a strange error */ regcomp(&(regexps.authexp), "^SIP/[0-9]\\.[0-9] 40[17] ", REG_EXTENDED|REG_NOSUB|REG_ICASE); insert_auth(req, rec); if (verbose > 2) printf("\nreceived:\n%s\n", rec); new_transaction(req, rep); continue; } /* if auth...*/ /* lets see if received a redirect */ if (redirects == 1 && regexec(&(regexps.redexp), rec, 0, 0, 0) == REG_NOERROR) { handle_3xx(&(cdata.adr)); } /* if redircts... */ else if (trace == 1) { trace_reply(); } /* if trace ... */ else if (usrloc == 1||invite == 1||message == 1) { handle_usrloc(); } else if (randtrash == 1) { handle_randtrash(); } else { handle_default(); } /* redirect, auth, and modes */ } /* ret > 0 */ else if (ret == -1) { // we did not got anything back, send again /* no re-transmission on reliable transports */ if (transport != SIP_UDP_TRANSPORT) { cdata.dontsend = 1; } continue; } else if (ret == -2) { // we received non-matching ICMP cdata.dontsend = 1; continue; } else { if (usrloc == 1) { printf("failed\n"); } perror("socket error"); exit_code(3, __PRETTY_FUNCTION__, "internal socket error"); } } /* !flood */ else { if (counters.send_counter == 1) { memcpy(&(timers.firstsendt), &(timers.sendtime), sizeof(struct timeval)); } if (namebeg==nameend) { printf("flood end reached\n"); printf("it took %.3f ms seconds to send %i request.\n", deltaT(&(timers.firstsendt), &(timers.sendtime)), namebeg); printf("we sent %f requests per second.\n", (namebeg/(deltaT(&(timers.firstsendt), &(timers.sendtime)))*1000)); exit_code(0, __PRETTY_FUNCTION__, NULL); } namebeg++; cseq_counter++; create_msg(REQ_FLOOD, req, NULL, usern, cseq_counter); } } /* while 1 */ /* this should never happen any more... */ if (randtrash == 1) { exit_code(0, __PRETTY_FUNCTION__, NULL); } printf("** give up retransmissioning....\n"); if (counters.retrans_r_c>0 && (verbose > 1)) { printf("%i retransmissions received during test\n", counters.retrans_r_c); } if (counters.retrans_s_c>0 && (verbose > 1)) { printf("sent %i retransmissions during test\n", counters.retrans_s_c); } exit_code(3, __PRETTY_FUNCTION__, "got outside of endless messaging loop"); }
/* Do a read_test, then release fd and try to read/write again. Verify that grpc_tcp_fd() is available before the fd is released. */ static void release_fd_test(size_t num_bytes, size_t slice_size) { int sv[2]; grpc_endpoint *ep; struct read_socket_state state; size_t written_bytes; int fd; gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_closure fd_released_cb; int fd_released_done = 0; grpc_closure_init(&fd_released_cb, &on_fd_released, &fd_released_done); gpr_log(GPR_INFO, "Release fd read_test of size %d, slice size %d", num_bytes, slice_size); create_sockets(sv); ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test"), slice_size, "test"); GPR_ASSERT(grpc_tcp_fd(ep) == sv[1] && sv[1] >= 0); grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset); written_bytes = fill_socket_partial(sv[0], num_bytes); gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes); state.ep = ep; state.read_bytes = 0; state.target_read_bytes = written_bytes; gpr_slice_buffer_init(&state.incoming); grpc_closure_init(&state.read_cb, read_cb, &state); grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); while (state.read_bytes < state.target_read_bytes) { grpc_pollset_worker *worker = NULL; grpc_pollset_work(&exec_ctx, &g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); } GPR_ASSERT(state.read_bytes == state.target_read_bytes); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_slice_buffer_destroy(&state.incoming); grpc_tcp_destroy_and_release_fd(&exec_ctx, ep, &fd, &fd_released_cb); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); while (!fd_released_done) { grpc_pollset_worker *worker = NULL; grpc_pollset_work(&exec_ctx, &g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline); } gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); GPR_ASSERT(fd_released_done == 1); GPR_ASSERT(fd == sv[1]); grpc_exec_ctx_finish(&exec_ctx); written_bytes = fill_socket_partial(sv[0], num_bytes); drain_socket_blocking(fd, written_bytes, written_bytes); written_bytes = fill_socket_partial(fd, num_bytes); drain_socket_blocking(sv[0], written_bytes, written_bytes); close(fd); }
/** * Initialization based on command line args */ void initialize() { char tempf1[MAXPATHNAME], hostname[256]; struct hostent *hp; struct in_addr *addr; int fd, i; parent = 0; // Load list of multicast interfaces if (interface_count == 0) { for (i = 0; i < ifl_len; i++) { if (!ifl[i].isloopback) { m_interface[interface_count++] = ifl[i]; } } } // No non-loopback interfaces, so just use the hostname's interface if (interface_count == 0) { gethostname(hostname, sizeof(hostname)); if ((hp = gethostbyname(hostname)) == NULL) { fprintf(stderr, "Can't get host name\n"); exit(1); } else { addr = (struct in_addr *)hp->h_addr_list[0]; m_interface[interface_count].addr = *addr; m_interface[interface_count].ismulti = 1; m_interface[interface_count++].isloopback = 0; } } if (uid) { m_interface[interface_count].addr.s_addr = uid; m_interface[interface_count].ismulti = 0; m_interface[interface_count++].isloopback = 0; } // Check validity of dest, backup, and temp directories for (i = 0; i < destdircnt; i++) { if (!isfullpath(destdir[i])) { fprintf(stderr, "ERROR: must specify absolute pathname " "for dest directory\n"); exit(1); } snprintf(tempf1, sizeof(tempf1)-1, "%s%c_uftptmp1",destdir[i],PATH_SEP); tempf1[sizeof(tempf1)-1] = '\x0'; if ((fd = open(tempf1, O_WRONLY | O_CREAT, 0644)) < 0) { perror("couldn't write to dest directory"); exit(1); } close(fd); unlink(tempf1); if (backupcnt > 0) { // backupcnt and destdircnt are always equal if (!strcmp(backupdir[i], destdir[i])) { fprintf(stderr, "ERROR: corresponding backup dir and dest dir " "must be different\n"); exit(1); } if (!isfullpath(backupdir[i])) { fprintf(stderr, "ERROR: must specify absolute pathname " "for backup directory\n"); exit(1); } if (!dirs_movable(destdir[i], backupdir[i])) { exit(1); } } } if (strcmp(tempdir, "")) { if (destdircnt > 1) { fprintf(stderr, "ERROR: Cannot use a temp directory " "with multiple dest directories\n"); exit(1); } if (backupcnt > 0) { fprintf(stderr, "ERROR: Cannot use a temp directory " "with a backup directory\n"); exit(1); } if (!strcmp(tempdir, destdir[0])) { fprintf(stderr, "ERROR: temp dir and dest dir must be different\n"); exit(1); } if (!isfullpath(tempdir)) { fprintf(stderr, "ERROR: must specify absolute pathname " "for temp directory\n"); exit(1); } if (!dirs_movable(tempdir, destdir[0])) { exit(1); } } if (!pub_multi_count) { pub_multi[0].s_addr = inet_addr(DEF_PUB_MULTI); pub_multi_count = 1; } for (i = 0; i < MAXLIST; i++) { group_list[i].group_id = 0; } next_hb_time.tv_sec = 0; next_hb_time.tv_usec = 0; next_keyreq_time.tv_sec = 0; next_keyreq_time.tv_usec = 0; atexit(cleanup); key_init(); create_sockets(); daemonize(); showtime = 1; }
/** * Initialization based on command line args */ void initialize(void) { char tempf1[MAXPATHNAME], hostname[256]; struct addrinfo ai_hints, *ai_rval; int rval, fd, i; parent = 0; srand((unsigned int)time(NULL) ^ getpid()); // Load list of multicast interfaces if (interface_count == 0) { for (i = 0; i < ifl_len; i++) { if (!ifl[i].isloopback) { m_interface[interface_count++] = ifl[i]; } } } // No non-loopback interfaces, so just use the hostname's interface if (interface_count == 0) { gethostname(hostname, sizeof(hostname)); memset(&ai_hints, 0, sizeof(ai_hints)); ai_hints.ai_family = AF_UNSPEC; ai_hints.ai_socktype = SOCK_DGRAM; ai_hints.ai_protocol = 0; ai_hints.ai_flags = 0; if ((rval = getaddrinfo(hostname, NULL, &ai_hints, &ai_rval)) != 0) { fprintf(stderr, "Can't get address of hostname %s: %s\n", hostname, gai_strerror(rval)); exit(ERR_PARAM); } memcpy(&m_interface[interface_count].su, ai_rval->ai_addr, ai_rval->ai_addrlen); m_interface[interface_count].ismulti = 1; m_interface[interface_count++].isloopback = 0; freeaddrinfo(ai_rval); } if (!uid) { if (m_interface[0].su.ss.ss_family == AF_INET6) { uid = m_interface[0].su.sin6.sin6_addr.s6_addr[12] << 24; uid |= m_interface[0].su.sin6.sin6_addr.s6_addr[13] << 16; uid |= m_interface[0].su.sin6.sin6_addr.s6_addr[14] << 8; uid |= m_interface[0].su.sin6.sin6_addr.s6_addr[15]; } else { uid = m_interface[0].su.sin.sin_addr.s_addr; } } // Check validity of dest, backup, and temp directories for (i = 0; i < destdircnt; i++) { if (!isfullpath(destdir[i])) { fprintf(stderr, "ERROR: must specify absolute pathname " "for dest directory\n"); exit(ERR_PARAM); } snprintf(tempf1, sizeof(tempf1)-1, "%s%c_uftptmp1",destdir[i],PATH_SEP); tempf1[sizeof(tempf1)-1] = '\x0'; if ((fd = open(tempf1, O_WRONLY | O_CREAT, 0644)) < 0) { perror("couldn't write to dest directory"); exit(ERR_PARAM); } close(fd); unlink(tempf1); if (backupcnt > 0) { // backupcnt and destdircnt are always equal if (!strcmp(backupdir[i], destdir[i])) { fprintf(stderr, "ERROR: corresponding backup dir and dest dir " "must be different\n"); exit(ERR_PARAM); } if (!isfullpath(backupdir[i])) { fprintf(stderr, "ERROR: must specify absolute pathname " "for backup directory\n"); exit(ERR_PARAM); } if (!dirs_movable(destdir[i], backupdir[i])) { exit(ERR_PARAM); } } } if (strcmp(tempdir, "")) { if (destdircnt > 1) { fprintf(stderr, "ERROR: Cannot use a temp directory " "with multiple dest directories\n"); exit(ERR_PARAM); } if (backupcnt > 0) { fprintf(stderr, "ERROR: Cannot use a temp directory " "with a backup directory\n"); exit(ERR_PARAM); } if (!strcmp(tempdir, destdir[0])) { fprintf(stderr, "ERROR: temp dir and dest dir must be different\n"); exit(ERR_PARAM); } if (!isfullpath(tempdir)) { fprintf(stderr, "ERROR: must specify absolute pathname " "for temp directory\n"); exit(ERR_PARAM); } if (!dirs_movable(tempdir, destdir[0])) { exit(ERR_PARAM); } } if (strcmp(postreceive, "")) { if (!isfullpath(postreceive)) { fprintf(stderr, "ERROR: must specify absolute pathname " "for postreceive script\n"); exit(ERR_PARAM); } } if (!pub_multi_count) { memset(&ai_hints, 0, sizeof(ai_hints)); ai_hints.ai_family = AF_UNSPEC; ai_hints.ai_socktype = SOCK_DGRAM; ai_hints.ai_protocol = 0; ai_hints.ai_flags = AI_NUMERICHOST; if ((rval = getaddrinfo(DEF_PUB_MULTI, NULL, &ai_hints, &ai_rval)) != 0) { fprintf(stderr, "Can't get address of default public address: %s\n", gai_strerror(rval)); exit(ERR_PARAM); } memcpy(&pub_multi[0], ai_rval->ai_addr, ai_rval->ai_addrlen); freeaddrinfo(ai_rval); pub_multi_count = 1; } for (i = 0; i < MAXLIST; i++) { memset(&group_list[i], 0, sizeof(struct group_list_t)); } next_hb_time.tv_sec = 0; next_hb_time.tv_usec = 0; next_keyreq_time.tv_sec = 0; next_keyreq_time.tv_usec = 0; atexit(cleanup); key_init(); create_sockets(); daemonize(); showtime = 1; if (!strcmp(statusfilename, "@LOG")) { status_file = applog; } else if (strcmp(statusfilename, "")) { if ((status_file = fopen(statusfilename, "at")) == NULL) { perror("Can't open status file"); exit(ERR_PARAM); } } }