static int writebuf_tx(scamper_writebuf_t *wb, int fd) { struct iovec *iov; int size; if(slist_count(wb->iovs) == 0) return 0; iov = slist_head_get(wb->iovs); if((size = send(fd, iov->iov_base, iov->iov_len, 0)) == -1) return -1; if((size_t)size == iov->iov_len) { slist_head_pop(wb->iovs); free(iov->iov_base); free(iov); } else { iov->iov_len -= size; memmove(iov->iov_base, (uint8_t *)iov->iov_base + size, iov->iov_len); } return 0; }
/* * Description: Removes the element from a slist at the given position. * If the position is negative or greater than the size of the slist, * then the slist is unchanged. */ SList slist_remove(SList node, int position) { int counter = 0; SList ptr = NULL; if (position < 0 || slist_count(node) < position) { return node; } ptr = node; if (position > 0) counter++; while (ptr && counter != position) { ptr = ptr->next; counter++; } if (!position) { node = node->next; free(ptr); } else { ptr--; ptr->next = ptr->next->next; free(ptr->next); } return node; }
/* * writebuf_callback * * this function is called by the scamper_fd code whenever the fd is ready to * write to. */ static void writebuf_callback(int fd, void *param) { scamper_writebuf_t *wb = (scamper_writebuf_t *)param; assert(wb->fdn != NULL); assert(scamper_fd_fd_get(wb->fdn) == fd); /* * if this callback was called, but there is no outstanding data to * send, see if there is a consume function with data available. if * there is not then withdraw the entry from the fd monitoring module */ if(slist_count(wb->iovs) == 0 && wb->cfunc != NULL) { wb->cfunc(wb->cparam); } if(slist_count(wb->iovs) > 0) { if(writebuf_tx(wb, fd) != 0) { wb->error = errno; if(wb->efunc != NULL) wb->efunc(wb->param, errno, wb); return; } } else { if(wb->cfunc != NULL) { wb->cfunc = NULL; wb->cparam = NULL; } } /* if all the iovecs are sent, withdraw the fd monitor */ if(slist_count(wb->iovs) == 0 && wb->cfunc == NULL) { scamper_fd_write_pause(wb->fdn); if(wb->dfunc != NULL) wb->dfunc(wb->param, wb); return; } return; }
int scamper_writebuf_consume(scamper_writebuf_t *wb, void *cparam, int cfunc(void *param)) { wb->cparam = cparam; wb->cfunc = cfunc; /* don't need to consume if there is already stuff queued to send */ if(slist_count(wb->iovs) > 0) return 0; /* consume. if there is no effect then drop the consume pointer */ cfunc(cparam); if(slist_count(wb->iovs) == 0) { wb->cparam = NULL; wb->cfunc = NULL; } return 0; }
struct curl_client_t* _curl_pool_client_alloc(curl_pool_t* cp) { struct curl_client_t* cc = NULL; if (!cp || !cp->free_list) { return NULL; } if (slist_count(cp->free_list) > 0) { cc = slist_pop_front(cp->free_list); return cc; } cp->size ++; cc = curl_client_init(); if (cc) { hash_insert(cp->clients, cc); } return cc; }
int scamper_task_sig_install(scamper_task_t *task) { scamper_task_sig_t *sig; scamper_task_t *tf; s2t_t *s2t; slist_node_t *n; if(slist_count(task->siglist) < 1) return -1; for(n=slist_head_node(task->siglist); n != NULL; n = slist_node_next(n)) { s2t = slist_node_item(n); sig = s2t->sig; /* check if another task has this signature already */ if((tf = scamper_task_find(sig)) != NULL) { if(tf != task) goto err; continue; } if(sig->sig_type == SCAMPER_TASK_SIG_TYPE_TX_IP) s2t->node = splaytree_insert(tx_ip, s2t); else if(sig->sig_type == SCAMPER_TASK_SIG_TYPE_TX_ND) s2t->node = splaytree_insert(tx_nd, s2t); else if(sig->sig_type == SCAMPER_TASK_SIG_TYPE_SNIFF) s2t->node = dlist_tail_push(sniff, s2t); if(s2t->node == NULL) { scamper_debug(__func__, "could not install sig"); goto err; } } return 0; err: scamper_task_sig_deinstall(task); return -1; }
void add_individual(struct rcps_individual *ind, struct rcps_population *pop) { struct slist_node *n; struct rcps_individual *i; n = slist_node_new(ind); slist_add(pop->individuals, n); ++pop->size; // what is this? (da) while (slist_count(pop->individuals) > pop->size) { n = slist_last(pop->individuals); if (!n) { // XXX what the f**k is that? fprintf(stderr, "uhu, no one there?\n"); } slist_unlink(pop->individuals, n); i = (struct rcps_individual*)slist_node_getdata(n); slist_node_free(n, NULL); free(i->genome.schedule); free(i->genome.modes); free(i->genome.alternatives); free(i); } }
/* * Description: Inserts a new element into the slist at the given position */ SList slist_insert(SList node, void *data, int position) { int counter = 0; SList ptr = NULL, aux = NULL; if (position < 0 || slist_count(node) < position) { node = slist_append(node, data); return node; } aux = node; if (position > 0) counter++; while (aux && counter != position) { aux = aux->next; counter++; } if ((ptr = (SList)xmalloc(sizeof(struct SList_st), 1)) == NULL) { return NULL; } ptr->data = data; if (!position) { ptr->next = aux; node = ptr; } else { ptr->next = aux->next; aux->next = ptr; } return node; }
void dns_query_cb(int errcode, struct evutil_addrinfo *addr, void *ptr) { P_DNS_STRUCT p_dns = (P_DNS_STRUCT)ptr; if (errcode) { printf("Query error for: %s -> %s\n", p_dns->hostname, evutil_gai_strerror(errcode)); } else { struct evutil_addrinfo *ai; struct sockaddr_in sin; for (ai = addr; ai; ai = ai->ai_next) { if (ai->ai_family == AF_INET) { memset(&sin, 0, sizeof(sin)); sin.sin_family = AF_INET; sin.sin_addr = ((struct sockaddr_in *)ai->ai_addr)->sin_addr; sin.sin_port = p_dns->port; st_d_print("REQUEST: %s:%d", inet_ntoa(sin.sin_addr), ntohs(sin.sin_port)); int remote_socket = ss_connect_srv(&sin); if (remote_socket == -1) { st_d_error("REQUEST: %s:%d FAILED!", inet_ntoa(sin.sin_addr), ntohs(sin.sin_port)); continue; } evutil_make_socket_nonblocking(p_dns->p_c_item->socket); struct bufferevent *new_bev = bufferevent_socket_new(p_dns->p_threadobj->base, p_dns->p_c_item->socket, BEV_OPT_CLOSE_ON_FREE); assert(new_bev); bufferevent_setcb(new_bev, thread_bufferread_cb_enc, NULL, thread_bufferevent_cb, p_dns->p_trans); bufferevent_enable(new_bev, EV_READ|EV_WRITE); evutil_make_socket_nonblocking(remote_socket); struct bufferevent *new_ext_bev = bufferevent_socket_new(p_dns->p_threadobj->base, remote_socket , BEV_OPT_CLOSE_ON_FREE); assert(new_ext_bev); bufferevent_setcb(new_ext_bev, thread_bufferread_cb_enc, NULL, thread_bufferevent_cb, p_dns->p_trans); bufferevent_enable(new_ext_bev, EV_READ|EV_WRITE); p_dns->p_trans->bev_d = new_bev; p_dns->p_trans->bev_u = new_ext_bev; st_d_print("DDDDD: 当前活动连接数:[[[ %d ]]], 任务队列:[[ %d ]]", slist_count(&p_dns->p_trans->p_activ_item->trans), slist_count(&p_dns->p_threadobj->conn_queue)); st_d_print("SS5激活客户端Bufferevent使能!"); CTL_HEAD head; memset(&head, 0, CTL_HEAD_LEN); head.direct = USR_DAEMON; head.cmd = HD_CMD_SS5_ACT; head.extra_param = p_dns->p_trans->usr_lport; head.mach_uuid = p_dns->p_trans->p_activ_item->mach_uuid; bufferevent_write(p_dns->p_trans->p_activ_item->bev_daemon, &head, CTL_HEAD_LEN); break; } st_d_print("ALL REQUEST FOR %s FAILED!", p_dns->hostname); } evutil_freeaddrinfo(addr); } free(p_dns->p_c_item); free(p_dns); return; }
/** * 这里有一个竞争条件:如果这里不能建立libevent连接,或者发送HD_CMD_SS5_ACT之前就收到了 * EOF的事件,那么客户端就会存在一个僵尸的trans连接,客户端目前是单线程的,必须消除这种 * 消耗 * * 目前想到的处理方式就是,在拆除trans的同时,额外的向客户端主通道发送一个命令 */ static void thread_process(int fd, short which, void *arg) { P_THREAD_OBJ p_threadobj = (P_THREAD_OBJ)arg; P_TRANS_ITEM p_trans = NULL; P_SLIST_HEAD p_list = NULL; P_C_ITEM p_c_item = NULL; struct bufferevent *new_bev = NULL; char buf[1]; CTL_HEAD head; if (read(fd, buf, 1) != 1) { st_d_error("Can't read from libevent pipe\n"); return; } switch (buf[0]) { case 'D': // DAEMON->USR p_list = slist_fetch(&p_threadobj->conn_queue); if (!p_list) { st_d_error("无法从任务队列中获取任务!"); return; } p_c_item = list_entry(p_list, C_ITEM, list); p_trans = (P_TRANS_ITEM)p_c_item->arg.ptr; new_bev = bufferevent_socket_new(p_threadobj->base, p_c_item->socket, BEV_OPT_CLOSE_ON_FREE); bufferevent_setcb(new_bev, thread_bufferread_cb, NULL, thread_bufferevent_cb, p_trans); bufferevent_enable(new_bev, EV_READ|EV_WRITE); p_trans->bev_d = new_bev; free(p_c_item); if (p_trans->bev_u == NULL || p_trans->usr_lport == 0 || p_trans->p_activ_item == NULL) { SYS_ABORT("USR SIDE SHOULD BE OK ALREAY!!!"); } st_d_print("WORKTHREAD-> DAEMON_USR(%d) OK!", p_trans->usr_lport); st_d_print("DDDDD: 当前活动连接数:[[[ %d ]]],任务队列:[[ %d ]]", slist_count(&p_trans->p_activ_item->trans), slist_count(&p_threadobj->conn_queue)); st_d_print("激活客户端Bufferevent使能!"); memset(&head, 0, CTL_HEAD_LEN); head.direct = USR_DAEMON; head.cmd = HD_CMD_CONN_ACT; head.extra_param = p_trans->usr_lport; head.mach_uuid = p_trans->p_activ_item->mach_uuid; bufferevent_write(p_trans->p_activ_item->bev_daemon, &head, CTL_HEAD_LEN); head.direct = DAEMON_USR; bufferevent_write(p_trans->p_activ_item->bev_usr, &head, CTL_HEAD_LEN); break; case 'U': //USR->DAEMON p_list = slist_fetch(&p_threadobj->conn_queue); if (!p_list) { st_d_error("无法从任务队列中获取任务!"); return; } p_c_item = list_entry(p_list, C_ITEM, list); p_trans = (P_TRANS_ITEM)p_c_item->arg.ptr; new_bev = bufferevent_socket_new(p_threadobj->base, p_c_item->socket, BEV_OPT_CLOSE_ON_FREE); bufferevent_setcb(new_bev, thread_bufferread_cb, NULL, thread_bufferevent_cb, p_trans); bufferevent_enable(new_bev, EV_READ|EV_WRITE); p_trans->bev_u = new_bev; free(p_c_item); st_d_print("WORKTHREAD-> USR_DAEMON(%d) OK!", p_trans->usr_lport); break; case 'S': // DAEMON->USR p_list = slist_fetch(&p_threadobj->conn_queue); if (!p_list) { st_d_error("无法从任务队列中获取任务!"); return; } p_c_item = list_entry(p_list, C_ITEM, list); p_trans = (P_TRANS_ITEM)p_c_item->arg.ptr; assert(p_trans->is_enc); assert(p_trans->dat); encrypt_ctx_init(&p_trans->ctx_enc, p_trans->usr_lport, p_trans->p_activ_item->enc_key, 1); encrypt_ctx_init(&p_trans->ctx_dec, p_trans->usr_lport, p_trans->p_activ_item->enc_key, 0); int remote_socket = 0; char* buf = (char *)p_trans->dat; if (buf[3] == 0x01) { struct sockaddr_in sin; memset(&sin, 0, sizeof(sin)); sin.sin_family = AF_INET; memcpy(&sin.sin_addr.s_addr, &buf[4], 4); memcpy(&sin.sin_port, &buf[4+4], 2); free(p_trans->dat); st_d_print("REQUEST: %s:%d", inet_ntoa(sin.sin_addr), ntohs(sin.sin_port)); remote_socket = ss_connect_srv(&sin); if (remote_socket == -1) { free(p_c_item); st_d_error("CONNECT ERROR!"); return; } } else { char remote_addr[128]; unsigned short remote_port = 0; memset(remote_addr, 0, sizeof(remote_addr)); strncpy(remote_addr, &buf[4+1], buf[4]); memcpy(&remote_port, &buf[4+1+buf[4]], 2); free(p_trans->dat); P_DNS_STRUCT p_dns = (P_DNS_STRUCT)calloc(sizeof(DNS_STRUCT), 1); if (!p_dns) { st_d_error("申请内存失败:%d", sizeof(DNS_STRUCT)); free(p_c_item); return; } st_d_print("REQUEST: %s:%d", remote_addr, ntohs(remote_port)); strncpy(p_dns->hostname, remote_addr, sizeof(p_dns->hostname)); p_dns->port = remote_port; p_dns->p_c_item = p_c_item; p_dns->p_threadobj = p_threadobj; p_dns->p_trans = p_trans; struct evutil_addrinfo hints; struct evdns_getaddrinfo_request *req; memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_INET; hints.ai_flags = EVUTIL_AI_CANONNAME; /* Unless we specify a socktype, we'll get at least two entries for * each address: one for TCP and one for UDP. That's not what we * want. */ hints.ai_socktype = SOCK_STREAM; hints.ai_protocol = IPPROTO_TCP; req = evdns_getaddrinfo( srvopt.evdns_base, remote_addr, NULL /* no service name given */, &hints, dns_query_cb, p_dns); if (req == NULL) { printf(" [request for %s returned immediately]\n", remote_addr); /* No need to free user_data or decrement n_pending_requests; that * happened in the callback. */ } return; } evutil_make_socket_nonblocking(p_c_item->socket); struct bufferevent *new_bev = bufferevent_socket_new(p_threadobj->base, p_c_item->socket, BEV_OPT_CLOSE_ON_FREE); assert(new_bev); bufferevent_setcb(new_bev, thread_bufferread_cb_enc, NULL, thread_bufferevent_cb, p_trans); bufferevent_enable(new_bev, EV_READ|EV_WRITE); evutil_make_socket_nonblocking(remote_socket); struct bufferevent *new_ext_bev = bufferevent_socket_new(p_threadobj->base, remote_socket , BEV_OPT_CLOSE_ON_FREE); assert(new_ext_bev); bufferevent_setcb(new_ext_bev, thread_bufferread_cb_enc, NULL, thread_bufferevent_cb, p_trans); bufferevent_enable(new_ext_bev, EV_READ|EV_WRITE); p_trans->bev_d = new_bev; p_trans->bev_u = new_ext_bev; free(p_c_item); st_d_print("DDDDD: 当前活动连接数:[[[ %d ]]], 任务队列:[[ %d ]]", slist_count(&p_trans->p_activ_item->trans), slist_count(&p_threadobj->conn_queue)); st_d_print("SS5激活客户端Bufferevent使能!"); memset(&head, 0, CTL_HEAD_LEN); head.direct = USR_DAEMON; head.cmd = HD_CMD_SS5_ACT; head.extra_param = p_trans->usr_lport; head.mach_uuid = p_trans->p_activ_item->mach_uuid; bufferevent_write(p_trans->p_activ_item->bev_daemon, &head, CTL_HEAD_LEN); break; default: SYS_ABORT("WHAT DO I GET: %c", buf[0]); break; } return; }
static int writebuf_tx(scamper_writebuf_t *wb, int fd) { struct msghdr msg; struct iovec *iov; uint8_t *bytes; ssize_t size; slist_node_t *node; int i, iovs; if((iovs = slist_count(wb->iovs)) <= 0) { return 0; } /* * if there is only one iovec, or we can't allocate an array large enough * for the backlog, then just send the first without allocating the * array. otherwise, fill the array with the iovecs to send. */ if(iovs == 1 || (iov = malloc(iovs * sizeof(struct iovec))) == NULL) { iov = slist_head_get(wb->iovs); iovs = 1; } else { node = slist_head_node(wb->iovs); for(i=0; i<iovs; i++) { assert(node != NULL); memcpy(&iov[i], slist_node_item(node), sizeof(struct iovec)); node = slist_node_next(node); } } /* fill out the msghdr and set the send buf to be the iovecs */ memset(&msg, 0, sizeof(msg)); msg.msg_iov = iov; msg.msg_iovlen = iovs; size = sendmsg(fd, &msg, 0); /* if we allocated an array of iovecs, then free it now */ if(iovs > 1) { free(iov); } if(size == -1) { if(errno == EAGAIN || errno == EINTR) return 0; return -1; } /* free up the iovecs that have been sent */ while(size > 0) { node = slist_head_node(wb->iovs); iov = slist_node_item(node); /* if the whole iovec was used then it can be free'd */ if(iov->iov_len <= (size_t)size) { size -= iov->iov_len; free(iov->iov_base); free(iov); slist_head_pop(wb->iovs); continue; } /* if this iovec was only partially sent, then shift the vec */ bytes = (uint8_t *)iov->iov_base; memmove(iov->iov_base, bytes + size, iov->iov_len - size); iov->iov_len -= size; break; } return 0; }
/** * 只会在USR端被调用 */ void accept_conn_cb(struct evconnlistener *listener, evutil_socket_t fd, struct sockaddr *address, int socklen, void *ctx) { P_PORTMAP p_map = (P_PORTMAP)ctx; char hbuf[NI_MAXHOST], sbuf[NI_MAXSERV]; getnameinfo (address, socklen, hbuf, sizeof(hbuf),sbuf, sizeof(sbuf), NI_NUMERICHOST | NI_NUMERICSERV); st_print("WELCOME NEW CONNECT (HOST=%s, PORT=%s)\n", hbuf, sbuf); /* We got a new connection! Set up a bufferevent for it. */ struct event_base *base = evconnlistener_get_base(listener); int srv_fd = socket(AF_INET, SOCK_STREAM, 0); if(sc_connect_srv(srv_fd) != RET_YES) { st_d_error("连接服务器失败!"); return; } P_PORTTRANS p_trans = sc_create_trans(atoi(sbuf)); if (!p_trans) { st_d_error("本地无空闲TRANS!"); return; } struct bufferevent *local_bev = bufferevent_socket_new(base, fd, BEV_OPT_CLOSE_ON_FREE); assert(local_bev); bufferevent_setcb(local_bev, bufferread_cb, NULL, bufferevent_cb, p_trans); //bufferevent_enable(local_bev, EV_READ|EV_WRITE); struct bufferevent *srv_bev = bufferevent_socket_new(base, srv_fd, BEV_OPT_CLOSE_ON_FREE); assert(srv_bev); bufferevent_setcb(srv_bev, bufferread_cb, NULL, bufferevent_cb, p_trans); //bufferevent_enable(srv_bev, EV_READ|EV_WRITE); p_trans->is_enc = 0; p_trans->l_port = atoi(sbuf); p_trans->local_bev = local_bev; p_trans->srv_bev = srv_bev; st_d_print("DDDDD: 当前活动连接数:[[[ %d ]]]", slist_count(&cltopt.trans)); /* 向服务器报告连接请求 */ CTL_HEAD ret_head; memset(&ret_head, 0, CTL_HEAD_LEN); ret_head.cmd = HD_CMD_CONN; ret_head.daemonport = p_map->daemonport; ret_head.usrport = p_map->usrport; ret_head.extra_param = atoi(sbuf); ret_head.mach_uuid = cltopt.session_uuid; ret_head.direct = USR_DAEMON; bufferevent_write(srv_bev, &ret_head, CTL_HEAD_LEN); st_d_print("客户端创建BEV OK!"); /** * 有些服务是conn连接之后,服务端先发消息,然后客户端再进行响应的,所以 * 为了避免这种情况,客户端接收到conn消息之后,需要先向DAEMON端发送一个控制 * 消息,打通DAEMON端的数据传输接口 */ return; }