static inline int init_proper_exit(void) { struct sigaction action = { .sa_handler = sig_handler, }; sigemptyset(&action.sa_mask); atexit(free_resources); if (sigaction(SIGINT, &action, NULL) || sigaction(SIGTERM, &action, NULL)) ERR_RET(-1, "could not install signal handlers"); return 0; }
int load_cfg_http_svr(json_t *root, const char *key, http_svr_cfg *cfg) { json_t *node = json_object_get(root, key); if (!node || !json_is_object(node)) return -__LINE__; json_t *bind = json_object_get(node, "bind"); if (!bind) return -__LINE__; if (json_is_string(bind)) { cfg->bind_count = 1; cfg->bind_arr = malloc(sizeof(nw_svr_bind)); if (nw_sock_cfg_parse(json_string_value(bind), &cfg->bind_arr[0].addr, &cfg->bind_arr[0].sock_type) < 0) return -__LINE__; } else if (json_is_array(bind)) { cfg->bind_count = json_array_size(bind); if (cfg->bind_count == 0) return -__LINE__; cfg->bind_arr = malloc(sizeof(nw_svr_bind) * cfg->bind_count); for (uint32_t i = 0; i < cfg->bind_count; ++i) { json_t *row = json_array_get(bind, i); if (!json_is_string(row)) return -__LINE__; if (nw_sock_cfg_parse(json_string_value(row), &cfg->bind_arr[i].addr, &cfg->bind_arr[i].sock_type) < 0) return -__LINE__; } } else { return -__LINE__; } ERR_RET(read_cfg_uint32(node, "max_pkg_size", &cfg->max_pkg_size, true, 0)); ERR_RET(read_cfg_uint32(node, "buf_limit", &cfg->buf_limit, false, 0)); ERR_RET(read_cfg_uint32(node, "read_mem", &cfg->read_mem, false, 0)); ERR_RET(read_cfg_uint32(node, "write_mem", &cfg->write_mem, false, 0)); ERR_RET(read_cfg_int(node, "keep_alive", &cfg->keep_alive, false, 3600)); return 0; }
static int init(int argc, char **argv, struct animation *banner) { int i; struct string_list *filenames = NULL, *filenames_tail = NULL; int filenames_count = 0; init_log(); i = get_options(argc, argv); if (i < 0) return usage(argv[0], NULL); for ( ; i < argc; ++i) { if (banner->interval == (unsigned int)-1) if (!parse_interval(argv[i], &banner->interval)) continue; filenames_tail = string_list_add(&filenames, filenames_tail, argv[i]); if (!filenames_tail) return 1; else filenames_count++; } if (!filenames_count) return usage(argv[0], "No filenames specified"); if (fb_init(&_Fb)) return 1; if (init_proper_exit()) return 1; if (animation_init(filenames, filenames_count, &_Fb, banner)) return 1; string_list_destroy(filenames); if (banner->frame_count == 1 && RunCount == 1) banner->interval = 0; /* Single frame, exit after showing it */ else if (banner->interval == (unsigned int)-1) banner->interval = 1000 / 24; /* 24fps */ if (!Interactive && daemonify()) ERR_RET(1, "could not create a daemon"); return 0; }
int tcp_stream_get_data(tcp_stream_t *t, u_int8_t *buf, size_t size, int dir) { int tot; tcp_pktq_t *q; if (unlikely(!t || !buf || size < 1)) ERR_RET(-1, "invalid argument\n"); q = &t->pktqs[dir % 2]; tot = q->len > size ? size : q->len; if (tot == 0) return 0; return _tcp_pktq_get_data(q, buf, tot); }
int load_cfg_inetv4_list(json_t *root, const char *key, inetv4_list *cfg) { json_t *node = json_object_get(root, key); if (!node || !json_is_array(node)) return -__LINE__; cfg->count = json_array_size(node); cfg->arr = malloc(sizeof(struct sockaddr_in) * cfg->count); for (size_t i = 0; i < cfg->count; ++i) { json_t *row = json_array_get(node, i); if (!json_is_string(row)) { return -__LINE__; } ERR_RET(parse_inetv4_addr(json_string_value(row), &cfg->arr[i])); } return 0; }
int load_cfg_log(json_t *root, const char *key, log_cfg *cfg) { json_t *node = json_object_get(root, key); if (!node || !json_is_object(node)) return -__LINE__; ERR_RET(read_cfg_str(node, "path", &cfg->path, NULL)); ERR_RET(read_cfg_str(node, "flag", &cfg->flag, NULL)); cfg->shift = 0; char *shift; ERR_RET(read_cfg_str(node, "shift", &shift, "size")); strtolower(shift); if (strcmp(shift, "size") == 0) { cfg->shift |= DLOG_SHIFT_BY_SIZE; } else if (strcmp(shift, "hour") == 0) { cfg->shift |= DLOG_SHIFT_BY_HOUR; } else if (strcmp(shift, "min") == 0) { cfg->shift |= DLOG_SHIFT_BY_MIN; } else { cfg->shift |= DLOG_SHIFT_BY_DAY; } bool is_pid; bool is_fork; ERR_RET(read_cfg_bool(node, "pid", &is_pid, false, true)); ERR_RET(read_cfg_bool(node, "fork", &is_fork, false, true)); if (is_pid) { cfg->shift |= DLOG_LOG_PID; } if (is_fork) { cfg->shift |= DLOG_USE_FORK; } ERR_RET(read_cfg_int(node, "max", &cfg->max, false, 100 * 1000 * 1000)); ERR_RET(read_cfg_int(node, "num", &cfg->num, false, 100)); ERR_RET(read_cfg_int(node, "keep", &cfg->keep, false, 7)); return 0; }
int load_cfg_mysql(json_t *root, const char *key, mysql_cfg *cfg) { json_t *node = json_object_get(root, key); if (!node || !json_is_object(node)) return -__LINE__; ERR_RET(read_cfg_str(node, "host", &cfg->host, NULL)); ERR_RET(read_cfg_int(node, "port", &cfg->port, false, 3306)); ERR_RET(read_cfg_str(node, "user", &cfg->user, NULL)); ERR_RET(read_cfg_str(node, "pass", &cfg->pass, NULL)); ERR_RET(read_cfg_str(node, "name", &cfg->name, NULL)); ERR_RET(read_cfg_str(node, "charset", &cfg->charset, "utf8")); return 0; }
WTDB_ERR DbSessionManager::acquire_session(DbSession **session, bool pooled, unsigned long maxWait) { JET_ERR rc; if (!pooled) { ERR_RET( _engine->create_session(session) ) } else { rc = WaitForSingleObject(_sema, maxWait); if (rc == WAIT_TIMEOUT) { throw wyki_resource_timeout_error(L"Timed out waiting to acquire rights to create new database session"); } ERR_RET( _engine->create_session(session) ) } return JET_errSuccess; }
objpool_t * objpool_alloc(size_t objsize, int incsize, int locked) { objpool_t *op = NULL; pthread_mutexattr_t attr; if (objsize < 1) return NULL; if (incsize < 1) incsize = OBJPOOL_INC_SIZE; op = malloc(sizeof(objpool_t)); if (!op) ERR_RET(NULL, "malloc objpool failed: %s\n", ERRSTR); memset(op, 0, sizeof(objpool_t)); op->magic = time(NULL); op->objsize = align_num(objsize); op->incsize = incsize; op->nodesize = op->objsize + align_num(sizeof(_op_node_t)); /* using create time as magic number, so it's unique */ if (_op_alloc_cache(op)) { free(op); return NULL; } /* initiate pthread lock for thread-safe, mutex is error-check type */ if (locked) { op->need_lock = 1; pthread_mutexattr_init(&attr); pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK_NP); pthread_mutex_init(&op->lock, &attr); pthread_mutexattr_destroy(&attr); } return op; }
int load_cfg_rpc_clt(json_t *root, const char *key, rpc_clt_cfg *cfg) { json_t *node = json_object_get(root, key); if (!node || !json_is_object(node)) return -__LINE__; ERR_RET(read_cfg_str(node, "name", &cfg->name, NULL)); json_t *addr = json_object_get(node, "addr"); if (json_is_string(addr)) { cfg->addr_count = 1; cfg->addr_arr = malloc(sizeof(nw_addr_t)); if (nw_sock_cfg_parse(json_string_value(addr), &cfg->addr_arr[0], &cfg->sock_type) < 0) return -__LINE__; } else if (json_is_array(addr)) { cfg->addr_count = json_array_size(addr); if (cfg->addr_count == 0) return -__LINE__; cfg->addr_arr = malloc(sizeof(nw_addr_t) * cfg->addr_count); for (uint32_t i = 0; i < cfg->addr_count; ++i) { json_t *row = json_array_get(addr, i); if (!json_is_string(row)) return -__LINE__; if (nw_sock_cfg_parse(json_string_value(row), &cfg->addr_arr[i], &cfg->sock_type) < 0) return -__LINE__; } } else { return -__LINE__; } ERR_RET(read_cfg_uint32(node, "max_pkg_size", &cfg->max_pkg_size, true, 0)); ERR_RET(read_cfg_uint32(node, "buf_limit", &cfg->buf_limit, false, 0)); ERR_RET(read_cfg_uint32(node, "read_mem", &cfg->read_mem, false, 0)); ERR_RET(read_cfg_uint32(node, "write_mem", &cfg->read_mem, false, 0)); ERR_RET(read_cfg_real(node, "reconnect_timeout", &cfg->reconnect_timeout, false, 0)); ERR_RET(read_cfg_real(node, "heartbeat_timeout", &cfg->heartbeat_timeout, false, 0)); return 0; }
static void handle_su_serv_recv(fe_t * fe) { int ret; char ipbuff[INET6_ADDRSTRLEN]; int port; SAUN saddr; socklen_t socklen; su_serv_t *psvr = container_of(fe, su_serv_t, fe); struct iovec iovrecv[2] = {{0}}; /* assumed init to 0 */ struct msghdr msgrecv = {0}; /* assumed init to 0 */ frames_t *frame; recvagain: socklen = sizeof(SA6); frame = calloc(1, sizeof(frames_t) + REALDATAMAX); if (frame == 0) { errno = ENOBUFS; // ENOMEM err_ret("serv %x ENOBUFS", psvr); /* reject datagram */ ret = recvfrom(fe->fd, rejectbuff, sizeof(rejectbuff), 0, (SA*)&saddr, &socklen); if (ret < 0 && errno == EAGAIN) { return; } #ifdef SU_DEBUG_PEER_RECV switch (saddr.sfamily) { case PF_INET: case PF_INET6: #ifdef SU_DEBUG_IP6FULL su_get_ip_port_f(&saddr, ipbuff, sizeof(ipbuff), &port); #else su_get_ip_port(&saddr, ipbuff, sizeof(ipbuff), &port); #endif break; default: log_msg("serv %x reject unknown protocol raw bytes %d", psvr, ret); goto recvagain; }; ERR_RET("serv %x %d recv %s:%d bytes %d, but reject datas", psvr, fe->fd, ipbuff, port, ret); #endif return; } frame->srclen = psvr->servlen; msgrecv.msg_name = & frame->srcaddr; msgrecv.msg_namelen = frame->srclen; msgrecv.msg_iov = iovrecv; msgrecv.msg_iovlen = 2; iovrecv[0].iov_base = & frame->recvhdr; iovrecv[0].iov_len = sizeof(suhdr_t); iovrecv[1].iov_base = frame->data; iovrecv[1].iov_len = REALDATAMAX; if ((ret = recvmsg(fe->fd, &msgrecv, 0)) < 0) { if (ret < 0 && errno == EAGAIN) { free(frame); return; } ERR_RET("recvmsg error"); } switch (frame->srcaddr.sfamily) { case PF_INET: case PF_INET6: #ifdef SU_DEBUG_IP6FULL su_get_ip_port_f(&frame->srcaddr, ipbuff, sizeof(ipbuff), &port); #else su_get_ip_port(&frame->srcaddr, ipbuff, sizeof(ipbuff), &port); #endif break; default: log_msg("serv %x reject unknown protocol raw bytes %d", psvr, ret); free(frame); goto recvagain; }; if (ret < sizeof(suhdr_t)) { #ifdef SU_DEBUG_PEER_RECV errno = EBADMSG; err_ret("serv %x recv %s:%d raw bytes %d less-then head %d bytes", psvr, ipbuff, port, ret, sizeof(suhdr_t)); #endif free(frame); goto recvagain; } #ifdef SU_DEBUG_PEER_RECV log_msg("serv %x recv %s:%d raw bytes %d", psvr, ipbuff, port, ret); #endif suhdr_t *r = &frame->recvhdr; uint8_t act = r->act; uint8_t type = r->type; frame->len = ret - sizeof(suhdr_t); pthread_mutex_lock(&psvr->lock); if (act == SU_SYN && frame->len > 0) { if (!psvr->run) { pthread_mutex_unlock(&psvr->lock); free(frame); goto recvagain; } #if defined SU_DEBUG_PEER_RECV || defined SU_DEBUG_LIST log_msg("serv %x append syn "ColorRed"%p"ColorEnd" seq %d datagram len %d", psvr, frame, r->seq, frame->len); #endif list_append(&psvr->synrecvls, &frame->node); pthread_cond_broadcast(&psvr->syncond); } else if (act == SU_ACK && type == SU_RELIABLE) { if (psvr->ackwaitnum <= 0) { pthread_mutex_unlock(&psvr->lock); free(frame); goto recvagain; } #if defined SU_DEBUG_PEER_RECV || defined SU_DEBUG_LIST log_msg("serv %x append ack "ColorRed"%p"ColorEnd" seq %d datagram len %d", psvr, frame, r->seq, frame->len); #endif list_append(&psvr->ackrecvls, &frame->node); pthread_cond_broadcast(&psvr->ackcond); } else { pthread_mutex_unlock(&psvr->lock); #ifdef SU_DEBUG_PEER_RECV errno = EPROTO; err_ret("serv %x recv %s:%d raw bytes %d", psvr, ipbuff, port, ret); #endif free(frame); return; } pthread_mutex_unlock(&psvr->lock); goto recvagain; }
SXSocketRef SXCreateServerSocket(unsigned short port, int domain, int type, int protocol, SXError * err_ret) { SXSocketRef sockPtr = (SXSocketRef)sx_calloc(1, sizeof(sx_socket_t)); if (sockPtr == NULL) ERR_RET(SX_ERROR_MEM_ALLOC); sockPtr->domain = domain; sockPtr->type = type; sockPtr->protocol = protocol; sockPtr->ref_count = 1; sockPtr->port = port; socklen_t addrlen; memset(&sockPtr->addr, 0, sizeof(struct sockaddr_storage)); if ((sockPtr->sockfd = socket(domain, type, protocol)) == -1) { perror("socket"); ERR_RET(SX_ERROR_SYS_SOCKET); } switch (domain) { case AF_INET: sockaddr_in(sockPtr->addr).sin_addr.s_addr = INADDR_ANY; sockaddr_in(sockPtr->addr).sin_port = htons(port); sockaddr_in(sockPtr->addr).sin_len = sizeof(struct sockaddr_in); addrlen = sizeof(struct sockaddr_in); break; case AF_INET6: sockaddr_in6(sockPtr->addr).sin6_addr = in6addr_any; sockaddr_in6(sockPtr->addr).sin6_port = htons(port); sockaddr_in6(sockPtr->addr).sin6_len = sizeof(struct sockaddr_in6); addrlen = sizeof(struct sockaddr_in6); break; default: ERR_RET(SX_ERROR_INVALID_IPADDR); return NULL; } sockPtr->addr.ss_family = domain; int yes=1; if (setsockopt(sockPtr->sockfd,SOL_SOCKET,SO_REUSEADDR,&yes,sizeof(int)) == -1) { perror("setsockopt"); ERR_RET(SX_ERROR_SYS_SETSOCKOPT); } if (bind(sockPtr->sockfd, (struct sockaddr *)&sockPtr->addr, addrlen) == -1) { perror("bind"); ERR_RET(SX_ERROR_SYS_BIND); } ERR_RET(SX_SUCCESS); return sockPtr; }
/** * proposer_ack_redirect - Resolve an acceptor's claim that we are not the * true proposer. * * If we send a prepare to an acceptor who does not believe us to be the * true proposer, the acceptor will respond with a redirect. Since the * correctness of Paxos guarantees that the acceptor list has a consistent * total ordering, receiving a redirect means that there is someone more * fitting to be proposer who we have lost contact with. * * Note that this does not necessarily mean that the identified proposer is * still live; it is possible that we noticed a proposer failure and then * prepared before the acceptor who sent the redirect detected the failure. * To avoid this as much as possible, we wait for a majority of redirects * before accepting defeat and attempting reconnection to our superior. If * we "win" with a majority completing the prepare, then we drop the former * proposer regardless of whether he has some connections still open. */ int proposer_ack_redirect(struct paxos_header *hdr, msgpack_object *o) { int r; struct paxos_header orig_hdr; struct paxos_acceptor *acc; struct paxos_continuation *k; // We dispatched as the proposer, so we do not need to check again whether // we think ourselves to be the proposer. Instead, just sanity check that // the supposed true proposer has a lower ID than we do. This should // always be the case because of the consistency of proposer ranks. assert(hdr->ph_inum < pax->self_id); // If we are not still preparing, either we succeeded or our prepare was // rejected. In the former case, we should ignore the redirect because // we have affirmed our proposership with a majority vote. In the latter // case, if we connected to the true proposer, we would have dispatched // as an acceptor; and if we did not successfully connect, we would have // sent out another prepare. Hence, if we are not preparing, our prepare // succeeded and hence we should ignore the redirect. if (pax->prep == NULL) { return 0; } // Ensure that the redirect is for our current prepare; otherwise ignore. paxos_header_unpack(&orig_hdr, o); if (ballot_compare(orig_hdr.ph_ballot, pax->prep->pp_ballot) != 0) { return 0; } // Acknowledge the rejection of our prepare. pax->prep->pp_redirects++; // If we have been redirected by a majority, attempt reconnection. If a // majority redirects, our prepare will never succeed, but we defer freeing // it until reconnection occurs. This provides us with the guarantee (used // above) that an acceptor who identifies as the proposer and whose prepare // is non-NULL has either successfully prepared or has not yet begun its // prepare cycle. if (DEATH_ADJUSTED(pax->prep->pp_redirects) >= majority()) { // Connect to the higher-ranked acceptor indicated in the most recent // redirect message we received (i.e., this one). It's possible that an // even higher-ranked acceptor exists, but we'll find that out when we // try to send a request. acc = acceptor_find(&pax->alist, hdr->ph_inum); assert(acc->pa_peer == NULL); // Defer computation until the client performs connection. If it succeeds, // give up the prepare; otherwise, reprepare. k = continuation_new(continue_ack_redirect, acc->pa_paxid); ERR_RET(r, state.connect(acc->pa_desc, acc->pa_size, &k->pk_cb)); return 0; } // If we have heard back from everyone but the acks and redirects are tied, // just prepare again. if (pax->prep->pp_acks < majority() && DEATH_ADJUSTED(pax->prep->pp_redirects) < majority() && pax->prep->pp_acks + pax->prep->pp_redirects == pax->live_count) { g_free(pax->prep); pax->prep = NULL; return proposer_prepare(NULL); } return 0; }
/** * paxos_commit - Commit a value for an instance of the Paxos protocol. * * We totally order calls to paxos_learn by instance number in order to make * the join and greet protocols behave properly. This also gives our chat * clients an easy mechanism for totally ordering their logs without extra * work on their part. * * It is possible that failed DEC_PART decrees (i.e., decrees in which the * proposer attempts to disconnect an acceptor who a majority of acceptors * believe is still alive) could delay the learning of committed chat * messages. To avoid this, once a proposer receives enough rejections * of the decree, the part decree is replaced with a null decree. The * proposer can then issue the part again with a higher instance number * if desired. */ int paxos_commit(struct paxos_instance *inst) { int r; struct paxos_request *req = NULL; struct paxos_instance *it; // Mark the commit. inst->pi_committed = true; // Pull the request from the request cache if applicable. if (request_needs_cached(inst->pi_val.pv_dkind)) { req = request_find(&pax->rcache, inst->pi_val.pv_reqid); // If we can't find a request and need one, send out a retrieve to the // request originator and defer the commit. if (req == NULL) { return paxos_retrieve(inst); } } // Mark the cache. inst->pi_cached = true; // We should already have committed and learned everything before the hole. assert(inst->pi_hdr.ph_inum >= pax->ihole); // Since we want our learns to be totally ordered, if we didn't just fill // the hole, we cannot learn. if (inst->pi_hdr.ph_inum != pax->ihole) { // If we're the proposer, we have to just wait it out. if (is_proposer()) { return 0; } // If the hole has committed but is just waiting on a retrieve, we'll learn // when we receive the resend. if (pax->istart->pi_hdr.ph_inum == pax->ihole && pax->istart->pi_committed) { assert(!pax->istart->pi_cached); return 0; } // The hole is either missing or uncommitted and we are not the proposer, // so issue a retry. return acceptor_retry(pax->ihole); } // Set pax->istart to point to the instance numbered pax->ihole. if (pax->istart->pi_hdr.ph_inum != pax->ihole) { pax->istart = LIST_NEXT(pax->istart, pi_le); } assert(pax->istart->pi_hdr.ph_inum == pax->ihole); // Now learn as many contiguous commits as we can. This function is the // only path by which we learn commits, and we always learn in contiguous // blocks. Therefore, it is an invariant of our system that all the // instances numbered lower than pax->ihole are learned and committed, and // none of the instances geq to pax->ihole are learned (although some may // be committed). // // We iterate over the instance list, detecting and breaking if we find a // hole and learning whenever we don't. for (it = pax->istart; ; it = LIST_NEXT(it, pi_le), ++pax->ihole) { // If we reached the end of the list, set pax->istart to the last existing // instance. if (it == (void *)&pax->ilist) { pax->istart = LIST_LAST(&pax->ilist); break; } // If we skipped over an instance number because we were missing an // instance, set pax->istart to the last instance before the hole. if (it->pi_hdr.ph_inum != pax->ihole) { pax->istart = LIST_PREV(it, pi_le); break; } // If we found an uncommitted or uncached instance, set pax->istart to it. if (!it->pi_committed || !it->pi_cached) { pax->istart = it; break; } // By our invariant, since we are past our original hole, no instance // should be learned. assert(!it->pi_learned); // Grab its associated request. This is guaranteed to exist because we // have checked that pi_cached holds. req = NULL; if (request_needs_cached(it->pi_val.pv_dkind)) { req = request_find(&pax->rcache, it->pi_val.pv_reqid); assert(req != NULL); } // Learn the value. ERR_RET(r, paxos_learn(it, req)); } return 0; }
int tcp_stream_flow(tcp_stream_t *t, const netpkt_t *pkt, int dir) { int ret; struct tcphdr *h; if (unlikely(!t || !pkt)) ERR_RET(-1, "invalid argument\n"); if (unlikely(!pkt->hdr4_tcp)) ERR_RET(-1, "not decode TCP header\n"); h = pkt->hdr4_tcp; /* check seq/ack number */ if (_tcp_check_seq(t, h, dir)) ERR_RET(-1, "invalid seq number\n"); /* RST packet */ if (h->rst) { printf("<%d> RST: seq %u, ack %u\n", dir, ntohl(h->seq), ntohl(h->ack)); return -1; } printf("<%d>stream(%p) before state: %d\n", dir, t, t->state); /* TCP state */ switch (t->state) { case TCP_ST_SYN: /* server syn-ack packet */ if (dir == 1 && h->syn && h->ack) t->state = TCP_ST_SYN_ACK; break; case TCP_ST_SYN_ACK: /* client ACK, establish connection */ if (dir == 0 && h->ack) t->state = TCP_ST_EST; break; case TCP_ST_EST: /* receive Fin */ if (h->fin) t->state = TCP_ST_FIN1; if (h->psh) { ret = _tcp_pktq_add(&t->pktqs[dir], pkt); } break; case TCP_ST_FIN1: if (h->ack) t->state = TCP_ST_FIN1_ACK; if (h->fin) t->state = TCP_ST_FIN2; if (h->psh) { ret = _tcp_pktq_add(&t->pktqs[dir], pkt); } break; case TCP_ST_FIN1_ACK: if (h->fin) t->state = TCP_ST_FIN2; break; case TCP_ST_FIN2: if (h->ack) { t->state = TCP_ST_CLOSE; return 0; } break; default: break; } printf("<%d>stream(%p) after state: %d\n", dir, t, t->state); return ret; }
static void *thread_request_handle(void *v) { int ret; struct list *synnode; struct timespec abstime = {0}; frames_t *frame; su_serv_t *psvr = (su_serv_t*)v; pthread_t tid __attribute__((unused)) = pthread_self(); for (; psvr->run;) { pthread_mutex_lock(&psvr->lock); while ((synnode = psvr->synrecvls.next) == &psvr->synrecvls) { maketimeout_seconds(&abstime, 1); ret = pthread_cond_timedwait(&psvr->syncond, &psvr->lock, &abstime); if (!psvr->run) { pthread_mutex_unlock(&psvr->lock); goto quit; } if ( ret == ETIMEDOUT ) { pthread_mutex_lock(&psvr->cachelock); reliable_ack_unsave(psvr); pthread_mutex_unlock(&psvr->cachelock); } } list_remove(synnode); pthread_mutex_unlock(&psvr->lock); /* have request datagram */ frame = container_of(synnode, frames_t, node); rb_key_cache_t key; memcpy(&key.destaddr, &frame->srcaddr, sizeof(SAUN)); key.destlen = frame->srclen; key.seq = frame->recvhdr.seq; key.sid = frame->recvhdr.sid; struct rb_node *cachenode; cache_t *cache; pthread_mutex_lock(&psvr->cachelock); reliable_ack_unsave(psvr); if (frame->recvhdr.type == SU_RELIABLE) { if ( (cachenode = rb_search(&psvr->rbackcache, &key))) { cache = rb_entry(cachenode, cache_t, rbn); if (cache->frame.len == -1) { #ifdef SU_DEBUG_RBTREE char ipbuff[INET6_ADDRSTRLEN]; int port; su_get_ip_port_f(&cache->frame.srcaddr, ipbuff, sizeof(ipbuff), &port); log_msg("serv %x %x time %u key(%s:%d:%u:%u)" ColorRed " 0ACK cache %p" ColorEnd, psvr, tid, cache->ts, ipbuff, port, cache->frame.recvhdr.sid, cache->frame.recvhdr.seq, cache); #endif pthread_mutex_unlock(&psvr->cachelock); free(frame); continue; } #ifdef SU_DEBUG_RBTREE char ipbuff[INET6_ADDRSTRLEN]; int port; su_get_ip_port_f(&cache->frame.srcaddr, ipbuff, sizeof(ipbuff), &port); log_msg("serv %x %x time %u key(%s:%d:%u:%u)" ColorRed " @ACK cache %p" ColorEnd, psvr, tid, cache->ts, ipbuff, port, cache->frame.recvhdr.sid, cache->frame.recvhdr.seq, cache); #endif struct iovec iovsend[2] = {{0}}; struct msghdr msgsend = {0}; /* assumed init to 0 */ frame->recvhdr.act = SU_ACK; msgsend.msg_name = (void*)&cache->frame.srcaddr; msgsend.msg_namelen = cache->frame.srclen; msgsend.msg_iov = &iovsend[0]; msgsend.msg_iovlen = 2; iovsend[0].iov_base = &frame->recvhdr; iovsend[0].iov_len = sizeof(suhdr_t); iovsend[1].iov_base = (void*)cache->frame.data; /* get the cache results */ iovsend[1].iov_len = cache->frame.len; /* resend from cache */ if (sendmsg(psvr->fd, &msgsend, 0) != sizeof(suhdr_t) + cache->frame.len) { char ipbuff[INET6_ADDRSTRLEN]; int port; su_get_ip_port_f(&cache->frame.srcaddr, ipbuff, sizeof(ipbuff), &port); ERR_RET("retransmit sendmsg %s:%d:%u:%u:%u error", ipbuff, port, frame->recvhdr.seq, frame->recvhdr.ts, frame->recvhdr.sid); } #ifdef SU_DEBUG_PEER_RESEND else { char ipbuff[INET6_ADDRSTRLEN]; int port; su_get_ip_port_f(&cache->frame.srcaddr, ipbuff, sizeof(ipbuff), &port); log_msg("retransmit sendmsg %s:%d:%u:%u:%u", ipbuff, port, frame->recvhdr.seq, frame->recvhdr.ts, frame->recvhdr.sid); } #endif pthread_mutex_unlock(&psvr->cachelock); free(frame); continue; } else { if (reliable_ack___hold(psvr, frame) < 0) { err_ret("reliable_ack___hold error"); pthread_mutex_unlock(&psvr->cachelock); free(frame); continue; } } } pthread_mutex_unlock(&psvr->cachelock); request_handle(psvr, frame); #if defined SU_DEBUG_PEER_RECV || defined SU_DEBUG_LIST log_msg("serv %x %x delete syn "ColorRed"%p"ColorEnd" seq %d datagram len %d", psvr, tid, frame, frame->recvhdr.seq, frame->len); #endif free(frame); } quit: pthread_exit(0); }
int loop (int sock, struct sockaddr_nl *addr) { int received_bytes = 0; struct nlmsghdr *nlh; char destination_address[32]; char gateway_address[32]; struct rtmsg *route_entry; /* This struct represent a route entry \ in the routing table */ struct rtattr *route_attribute; /* This struct contain route \ attributes (route type) */ int route_attribute_len = 0; char buffer[BUFFER_SIZE]; bzero(destination_address, sizeof(destination_address)); bzero(gateway_address, sizeof(gateway_address)); bzero(buffer, sizeof(buffer)); /* Receiving netlink socket data */ while (1) { received_bytes = recv(sock, buffer, sizeof(buffer), 0); if (received_bytes < 0) ERR_RET("recv"); /* cast the received buffer */ nlh = (struct nlmsghdr *) buffer; /* If we received all data ---> break */ if (nlh->nlmsg_type == NLMSG_DONE) break; /* We are just intrested in Routing information */ if (addr->nl_groups == RTMGRP_IPV4_ROUTE) break; } /* Reading netlink socket data */ /* Loop through all entries */ /* For more informations on some functions : * http://www.kernel.org/doc/man-pages/online/pages/man3/netlink.3.html * http://www.kernel.org/doc/man-pages/online/pages/man7/rtnetlink.7.html */ for ( ; NLMSG_OK(nlh, received_bytes); \ nlh = NLMSG_NEXT(nlh, received_bytes)) { /* Get the route data */ route_entry = (struct rtmsg *) NLMSG_DATA(nlh); /* We are just intrested in main routing table */ if (route_entry->rtm_table != RT_TABLE_MAIN) continue; /* Get attributes of route_entry */ route_attribute = (struct rtattr *) RTM_RTA(route_entry); /* Get the route atttibutes len */ route_attribute_len = RTM_PAYLOAD(nlh); /* Loop through all attributes */ for ( ; RTA_OK(route_attribute, route_attribute_len); \ route_attribute = RTA_NEXT(route_attribute, route_attribute_len)) { /* Get the destination address */ if (route_attribute->rta_type == RTA_DST) { inet_ntop(AF_INET, RTA_DATA(route_attribute), \ destination_address, sizeof(destination_address)); } /* Get the gateway (Next hop) */ if (route_attribute->rta_type == RTA_GATEWAY) { inet_ntop(AF_INET, RTA_DATA(route_attribute), \ gateway_address, sizeof(gateway_address)); } } /* Now we can dump the routing attributes */ if (nlh->nlmsg_type == RTM_DELROUTE) fprintf(stdout, "Deleting route to destination --> %s and gateway %s\n", \ destination_address, gateway_address); if (nlh->nlmsg_type == RTM_NEWROUTE) printf("Adding route to destination --> %s and gateway %s\n", \ destination_address, gateway_address); } return 0; }
static int su_serv_send_recv_act(su_serv_t *psvr, SA *destaddr, socklen_t destlen, const void *outbuff, int outbytes, void *inbuff, int inbytes, int retransmit) { int n; struct iovec iovsend[2]= {{0}}; struct msghdr msgsend = {0}; /* assumed init to 0 */ suhdr_t *r, sendhdr = {0}; /* protocol header */ int ret, waitsec; struct list *node = 0; frames_t *packet = 0; pthread_mutex_lock(&psvr->mutex); pthread_mutex_lock(&psvr->lock); if (retransmit == 0) { psvr->seq++; psvr->retransmission = 1; } else { if (psvr->retransmission == 0) { pthread_mutex_unlock(&psvr->mutex); pthread_mutex_unlock(&psvr->lock); errno = ETIMEDOUT; return -1; } psvr->retransmission --; } if (psvr->rttinit == 0) { rtt_init(&psvr->rttinfo, psvr->retry); /* first time we're called */ psvr->rttinit = 1; } sendhdr.act = SU_SYN; sendhdr.type = SU_RELIABLE; sendhdr.sid = psvr->sid; sendhdr.seq = psvr->seq; msgsend.msg_name = (void*)destaddr; msgsend.msg_namelen = destlen; msgsend.msg_iov = iovsend; msgsend.msg_iovlen = 2; iovsend[0].iov_base = (void*)&sendhdr; iovsend[0].iov_len = sizeof(suhdr_t); iovsend[1].iov_base = (void*)outbuff; iovsend[1].iov_len = outbytes; struct timespec abstime = {0}; suhdr_t *precvhdr; rtt_newpack(&psvr->rttinfo); /* initialize for this packet */ psvr->ackwaitnum ++; sendagain: sendhdr.ts = rtt_ts(&psvr->rttinfo); if (sendmsg(psvr->fd, &msgsend, 0) < 0) { ERR_RET("sendmsg error"); goto error_ret; } waitsec = rtt_start(&psvr->rttinfo); /* calc timeout value & start timer */ #ifdef SU_DEBUG_RTT fprintf(stderr, ColorRed "send seq %4d: " ColorEnd, sendhdr.seq); rtt_debug(&psvr->rttinfo); #endif /* set timed wait time-point */ maketimeout_seconds(&abstime, waitsec); #ifdef SU_DEBUG_TIMEVERBOSE struct timeval now; gettimeofday(&now, 0); log_msg( ColorBlue "pthread_cond_timedwait : %u.%u time expire" ColorEnd, abstime.tv_sec, abstime.tv_nsec); log_msg( ColorBlue "pthread_cond_timedwait : %d.%d now time" ColorEnd, now.tv_sec, now.tv_usec*1000); #endif timedwaitagain: ret = pthread_cond_timedwait(&psvr->ackcond, &psvr->lock, &abstime); if (ret == 0) { #ifdef SU_DEBUG_TIMEVERBOSE struct timeval now; gettimeofday(&now, 0); log_msg(ColorBlue "pthread_cond_timedwait : %d.%d ack cond interrupt" ColorEnd, now.tv_sec, now.tv_usec*1000); #endif node = psvr->ackrecvls.next; for (; node != &psvr->ackrecvls; node = node->next) { packet = container_of(node, frames_t, node); r = &packet->recvhdr; if (su_cmp_ack_SU_RELIABLE(&sendhdr, r)) { break; } } if ( node == &psvr->ackrecvls ) { /* Be careful of the lock, locked -> timedwait -> unlock */ #ifdef SU_DEBUG_LIST log_msg("serv %x no found seq %d ack, timed wait again", psvr, sendhdr.seq); #endif goto timedwaitagain; } /* Find response packet node */ list_remove(&packet->node); n = packet->len; precvhdr = &packet->recvhdr; #if defined SU_DEBUG_PEER_RECV || defined SU_DEBUG_LIST log_msg("serv %x finded ack " ColorRed "%p" ColorEnd " seq %d datagram len %d", psvr, packet, r->seq, packet->len); #endif #ifdef SU_DEBUG_RTT fprintf(stderr, ColorRed "recv seq %4d \n" ColorEnd, precvhdr->seq); #endif // SU_RELIABLE received response, copy to user buffer memcpy(inbuff, packet->data, n > inbytes ? inbytes : n); } else if (ret == EINTR) { log_msg("pthread_cond_timedwait system EINTR"); goto timedwaitagain; } else if (ret == ETIMEDOUT) { #ifdef SU_DEBUG_TIMEVERBOSE struct timeval now; gettimeofday(&now, 0); log_msg(ColorBlue "pthread_cond_timedwait : %u.%u ETIMEOUT have expired" ColorEnd, now.tv_sec, now.tv_usec*1000); #endif if (rtt_timeout(&psvr->rttinfo) < 0) { #ifdef SU_DEBUG_RTT err_msg(ColorYel "no response from server, giving up" ColorEnd); #endif psvr->rttinit = 0; /* reinit in case we're called again */ errno = ETIMEDOUT; goto error_ret; } #ifdef SU_DEBUG_RTT err_msg(ColorRed " seq %4d timeout, retransmitting %d" ColorEnd, sendhdr.seq, ++retransmit); #endif goto sendagain; } else { errno = ret; ERR_RET(" su_serv_send_recv_act unknown error[%d]", ret); goto error_ret; } /* calculate & store new RTT estimator values */ rtt_stop(&psvr->rttinfo, rtt_ts(&psvr->rttinfo) - precvhdr->ts); if (--psvr->ackwaitnum == 0) { su_serv_list_empty(psvr, &psvr->ackrecvls); } pthread_mutex_unlock(&psvr->mutex); pthread_mutex_unlock(&psvr->lock); #ifdef SU_DEBUG_LIST log_msg("serv %x free node " ColorRed "%p"ColorEnd" seq %d", psvr, packet, sendhdr.seq); #endif free(packet); return(n); /* return size of received datagram */ error_ret: if (--psvr->ackwaitnum == 0) { su_serv_list_empty(psvr, &psvr->ackrecvls); } pthread_mutex_unlock(&psvr->mutex); pthread_mutex_unlock(&psvr->lock); return(-1); }
static void handle_su_peer_recv(fe_t * fe) { int ret, port; char ipbuff[INET6_ADDRSTRLEN]; SAUN saddr; socklen_t socklen; su_peer_t *psar = container_of(fe, su_peer_t, fe); struct iovec iovrecv[2] = {{0}}; /* assumed init to 0 */ struct msghdr msgrecv = {0}; /* assumed init to 0 */ frames_t *frame; recvagain: socklen = psar->destlen; frame = calloc(1, sizeof(frames_t) + REALDATAMAX); if (frame == 0) { errno = ENOBUFS; // ENOMEM log_msg("peer %x ENOBUFS", psar); /* reject datagram */ ret = recvfrom(fe->fd, rejectbuff, sizeof(rejectbuff), 0, (SA*)&saddr, &socklen); if (ret < 0 && errno == EAGAIN) { return; } #ifdef SU_DEBUG_PEER_RECV switch (saddr.sfamily) { case PF_INET: case PF_INET6: #ifdef SU_DEBUG_IP6FULL su_get_ip_port_f(&saddr, ipbuff, sizeof(ipbuff), &port); #else su_get_ip_port(&saddr, ipbuff, sizeof(ipbuff), &port); #endif break; default: log_msg("peer %x reject unknown protocol raw bytes %d", psar, ret); free(frame); goto recvagain; }; ERR_RET("peer %x recv %s:%d bytes %d, but reject datas", psar, ipbuff, port, ret); #endif return; } frame->srclen = psar->destlen; msgrecv.msg_name = & frame->srcaddr; msgrecv.msg_namelen = frame->srclen; msgrecv.msg_iov = iovrecv; msgrecv.msg_iovlen = 2; iovrecv[0].iov_base = & frame->recvhdr; iovrecv[0].iov_len = sizeof(suhdr_t); iovrecv[1].iov_base = frame->data; iovrecv[1].iov_len = REALDATAMAX; if ((ret = recvmsg(fe->fd, &msgrecv, 0)) < 0) { if (ret < 0 && errno == EAGAIN) { free(frame); return; } ERR_RET("recvmsg error"); } switch (frame->srcaddr.sfamily) { case PF_INET: case PF_INET6: #ifdef SU_DEBUG_IP6FULL su_get_ip_port_f(&frame->srcaddr, ipbuff, sizeof(ipbuff), &port); #else su_get_ip_port(&frame->srcaddr, ipbuff, sizeof(ipbuff), &port); #endif break; default: log_msg("peer %x reject unknown protocol type %d raw bytes %d", psar, frame->srcaddr.sfamily, ret); free(frame); goto recvagain; }; #ifdef SU_DEBUG_PEER_RECV log_msg("peer %x recv %s:%d raw bytes %d", psar, ipbuff, port, ret); #endif if (ret < sizeof(suhdr_t)) { #ifdef SU_DEBUG_PEER_RECV errno = EBADMSG; err_ret("peer %x recv %s:%d raw bytes %d less than the protocol header %d", psar, ipbuff, port, ret, sizeof(suhdr_t)); #endif free(frame); goto recvagain; } suhdr_t *r = &frame->recvhdr; uint8_t act = r->act; uint8_t type = r->type; frame->len = ret - sizeof(suhdr_t); SAUN *psrc, *pdst; psrc = &frame->srcaddr; // foreign host pdst = &psar->destaddr; // localhost #ifndef promiscuous_mode /* Filter: Check address and port * compare datagram source and peer destination */ if ( (pdst->sfamily == PF_INET6 && sockaddr_in6_cmp(&psrc->addr6, &pdst->addr6 ) != 0) || (pdst->sfamily == PF_INET && sockaddr_in4_cmp(&psrc->addr4, &pdst->addr4 ) != 0) ){ #ifdef SU_DEBUG_PEER_RECV log_msg(ColorYel"peer %x reject act[0x%02x] from %s:%d datagram len %d"ColorEnd, act, psar, ipbuff, port, frame->len); #endif free(frame); goto recvagain; } #endif /* #ifndef promiscuous_mode */ pthread_mutex_lock(&psar->lock); if (act == SU_SYN && frame->len > 0) { if (!psar->run) { log_msg("peer %x thread handle no run"); pthread_mutex_unlock(&psar->lock); free(frame); goto recvagain; } #if defined SU_DEBUG_PEER_RECV || defined SU_DEBUG_LIST log_msg("peer %x append syn "ColorRed"%p"ColorEnd" seq %d datagram len %d", psar, frame, r->seq, frame->len); #endif list_append(&psar->synrecvls, &frame->node); pthread_mutex_unlock(&psar->lock); pthread_cond_broadcast(&psar->syncond); goto recvagain; } else if (act == SU_ACK && type == SU_RELIABLE) { #ifdef promiscuous_mode /* Filter: receive response from self request */ if ( (pdst->sfamily == PF_INET6 && sockaddr_in6_cmp(&psrc->addr6, &pdst->addr6 ) != 0) || (pdst->sfamily == PF_INET && sockaddr_in4_cmp(&psrc->addr4, &pdst->addr4 ) != 0) ){ #ifdef SU_DEBUG_PEER_RECV log_msg(ColorYel "peer %x reject ack from %s:%d datagram len %d" ColorEnd, psar, ipbuff, port, frame->len); #endif pthread_mutex_unlock(&psar->lock); free(frame); goto recvagain; } #endif /* #ifdef promiscuous_mode */ if (psar->ackwaitnum <= 0) { pthread_mutex_unlock(&psar->lock); free(frame); goto recvagain; } #if defined SU_DEBUG_PEER_RECV || defined SU_DEBUG_LIST log_msg("peer %x append ack "ColorRed"%p"ColorEnd" seq %d datagram len %d", psar, frame, r->seq, frame->len); #endif list_append(&psar->ackrecvls, &frame->node); pthread_mutex_unlock(&psar->lock); pthread_cond_broadcast(&psar->ackcond); goto recvagain; } else { pthread_mutex_unlock(&psar->lock); #ifdef SU_DEBUG_PEER_RECV errno = EPROTO; err_ret("peer %x recv %s:%d raw bytes %d", psar, ipbuff, port, ret); #endif free(frame); return; } pthread_mutex_unlock(&psar->lock); goto recvagain; }