void server_test_bigStream(void) { struct sockaddr_tipc server_addr; /* address for socket */ int listener_sd; /* socket to listen on */ int peer_sd; /* socket to receive on */ int numRec; /* number of messages received */ char failStr[50]; /* failure string */ info("****** TIPC big stream test server started ******\n\n"); listener_sd = createSocketTIPC (SOCK_STREAM); setServerAddrTo(&server_addr, TIPC_ADDR_NAMESEQ, TS_TEST_TYPE, TS_TEST_INST, TS_TEST_INST); server_addr.scope = TS_SCOPE; bindSocketTIPC(listener_sd, &server_addr); listenSocketTIPC(listener_sd); sendSyncTIPC(TS_SYNC_ID_1); /* tell client to start test */ peer_sd = acceptSocketTIPC(listener_sd); numRec = do_receive(peer_sd, 0); info("Subtest 1 with the MSG_WAITALL flag not set, number received = %d\n", numRec); if (numRec <= 0) { sprintf(failStr, "SubTest 1 returned %d", numRec); failTest(failStr); } sendSyncTIPC(TS_SYNC_ID_2); /* tell client to send 2nd stream */ numRec = do_receive(peer_sd, MSG_WAITALL); info("Subtest 2 with the MSG_WAITALL flag set, number received = %d\n", numRec); if (numRec != 1) { sprintf(failStr, "SubTest 2 returned %d", numRec); failTest(failStr); } recvSyncTIPC(TS_SYNC_ID_3); /* ensure client has closed connection */ numRec = do_receive(peer_sd, MSG_WAITALL); info("Subtest 3 with the MSG_WAITALL flag set, number received = %d\n", numRec); if (numRec != 0) { sprintf(failStr, "SubTest 3 returned %d", numRec); failTest(failStr); } closeSocketTIPC(peer_sd); closeSocketTIPC(listener_sd); info("****** TIPC big stream test server finished ******\n"); }
std::size_t receive(base_implementation_type& impl, const MutableBufferSequence& buffers, socket_base::message_flags flags, std::error_code& ec) { return do_receive(impl, buffer_sequence_adapter<std::experimental::net::mutable_buffer, MutableBufferSequence>::first(buffers), flags, ec); }
/** * <JA> * データ受信: サーバからの受信メッセージを標準出力にダンプする * * @param sd [in] 送信ソケット * </JA> * <EN> * Receive data: dump a message from server to standard out. * * @param sd [in] socket to send data * </EN> */ void do_output(int sd) { while(do_receive(sd, rbuf, MAXLINELEN) != NULL) { if (rbuf[0] == '.' && rbuf[1] == '\0') break; printf("> %s\n", rbuf); } fflush(stdout); }
void start_receive (BSocksClient *o, uint8_t *dest, int total) { ASSERT(total > 0) o->control.recv_dest = dest; o->control.recv_len = 0; o->control.recv_total = total; do_receive(o); }
int cmd_receive(int argc, char **argv) { int c; char *tomnt = NULL; char *fromfile = NULL; struct btrfs_receive r; int receive_fd = fileno(stdin); u64 max_errors = 1; int ret; memset(&r, 0, sizeof(r)); r.mnt_fd = -1; r.write_fd = -1; r.dest_dir_fd = -1; while ((c = getopt_long(argc, argv, "evf:", long_opts, NULL)) != -1) { switch (c) { case 'v': g_verbose++; break; case 'f': fromfile = optarg; break; case 'e': r.honor_end_cmd = 1; break; case 'E': max_errors = arg_strtou64(optarg); break; case '?': default: fprintf(stderr, "ERROR: receive args invalid.\n"); return 1; } } if (check_argc_exact(argc - optind, 1)) usage(cmd_receive_usage); tomnt = argv[optind]; if (fromfile) { receive_fd = open(fromfile, O_RDONLY | O_NOATIME); if (receive_fd < 0) { fprintf(stderr, "ERROR: failed to open %s\n", fromfile); return 1; } } ret = do_receive(&r, tomnt, receive_fd, max_errors); return !!ret; }
/** * @brief Receive operation * * This function receives data from the socket taking care of synchronization * with any other asynchronous operations. * Note: it can block the caller, because it calls do_receive() which * continues receiving until the given number of bytes have been received. * @param buf Pointer where received data must be put * @param size Size of data to be received * @return Number of bytes actually received * @exception runtime_error in case of too small buffer * * Example of usage: * <code> * std::array<char, 5> buf; * AbstractSocket::receive(net::buffer(b), 3); * </code> */ int AbstractSocket::receive (struct buff buf, std::size_t size) { int ret = 0; if (buf.size_ == 0 || size > buf.size_){ ERROR("Wrong buffer size!"); throw std::runtime_error ("Wrong buffer size"); } lock_.lock(); try { ret = do_receive(buf.ptr_, size); } catch (...) { ERROR("Receive error!"); } lock_.unlock(); return ret; }
static int do_cmd_receive(int argc, char **argv) { int c; char *tomnt = NULL; char *fromfile = NULL; struct btrfs_receive r; int receive_fd = fileno(stdin); int ret; memset(&r, 0, sizeof(r)); while ((c = getopt(argc, argv, "vf:")) != -1) { switch (c) { case 'v': g_verbose++; break; case 'f': fromfile = optarg; break; case '?': default: fprintf(stderr, "ERROR: receive args invalid.\n"); return 1; } } if (optind + 1 != argc) { fprintf(stderr, "ERROR: receive needs path to subvolume\n"); return 1; } tomnt = argv[optind]; if (fromfile) { receive_fd = open(fromfile, O_RDONLY | O_NOATIME); if (receive_fd < 0) { fprintf(stderr, "ERROR: failed to open %s\n", fromfile); return -errno; } } ret = do_receive(&r, tomnt, receive_fd); return ret; }
int cmd_receive(int argc, char **argv) { char *tomnt = NULL; char fromfile[PATH_MAX]; char realmnt[PATH_MAX]; struct btrfs_receive r; int receive_fd = fileno(stdin); u64 max_errors = 1; int ret = 0; memset(&r, 0, sizeof(r)); r.mnt_fd = -1; r.write_fd = -1; r.dest_dir_fd = -1; r.dest_dir_chroot = 0; realmnt[0] = 0; fromfile[0] = 0; while (1) { int c; static const struct option long_opts[] = { { "max-errors", required_argument, NULL, 'E' }, { "chroot", no_argument, NULL, 'C' }, { NULL, 0, NULL, 0 } }; c = getopt_long(argc, argv, "Cevf:m:", long_opts, NULL); if (c < 0) break; switch (c) { case 'v': g_verbose++; break; case 'f': if (arg_copy_path(fromfile, optarg, sizeof(fromfile))) { error("input file path too long (%zu)", strlen(optarg)); ret = 1; goto out; } break; case 'e': r.honor_end_cmd = 1; break; case 'C': r.dest_dir_chroot = 1; break; case 'E': max_errors = arg_strtou64(optarg); break; case 'm': if (arg_copy_path(realmnt, optarg, sizeof(realmnt))) { error("mount point path too long (%zu)", strlen(optarg)); ret = 1; goto out; } break; case '?': default: error("receive args invalid"); return 1; } } if (check_argc_exact(argc - optind, 1)) usage(cmd_receive_usage); tomnt = argv[optind]; if (fromfile[0]) { receive_fd = open(fromfile, O_RDONLY | O_NOATIME); if (receive_fd < 0) { error("cannot open %s: %s", fromfile, strerror(errno)); goto out; } } ret = do_receive(&r, tomnt, realmnt, receive_fd, max_errors); if (receive_fd != fileno(stdin)) close(receive_fd); out: return !!ret; }
void recv_handler_done (BSocksClient *o, int data_len) { ASSERT(data_len >= 0) ASSERT(data_len <= o->control.recv_total - o->control.recv_len) DebugObject_Access(&o->d_obj); o->control.recv_len += data_len; if (o->control.recv_len < o->control.recv_total) { do_receive(o); return; } switch (o->state) { case STATE_SENT_HELLO: { BLog(BLOG_DEBUG, "received hello"); struct socks_server_hello imsg; memcpy(&imsg, o->buffer, sizeof(imsg)); if (ntoh8(imsg.ver) != SOCKS_VERSION) { BLog(BLOG_NOTICE, "wrong version"); goto fail; } size_t auth_index; for (auth_index = 0; auth_index < o->num_auth_info; auth_index++) { if (o->auth_info[auth_index].auth_type == ntoh8(imsg.method)) { break; } } if (auth_index == o->num_auth_info) { BLog(BLOG_NOTICE, "server didn't accept any authentication method"); goto fail; } const struct BSocksClient_auth_info *ai = &o->auth_info[auth_index]; switch (ai->auth_type) { case SOCKS_METHOD_NO_AUTHENTICATION_REQUIRED: { BLog(BLOG_DEBUG, "no authentication"); auth_finished(o); } break; case SOCKS_METHOD_USERNAME_PASSWORD: { BLog(BLOG_DEBUG, "password authentication"); if (ai->password.username_len == 0 || ai->password.username_len > 255 || ai->password.password_len == 0 || ai->password.password_len > 255 ) { BLog(BLOG_NOTICE, "invalid username/password length"); goto fail; } // allocate password packet bsize_t size = bsize_fromsize(1 + 1 + ai->password.username_len + 1 + ai->password.password_len); if (!reserve_buffer(o, size)) { goto fail; } // write password packet char *ptr = o->buffer; *ptr++ = 1; *ptr++ = ai->password.username_len; memcpy(ptr, ai->password.username, ai->password.username_len); ptr += ai->password.username_len; *ptr++ = ai->password.password_len; memcpy(ptr, ai->password.password, ai->password.password_len); ptr += ai->password.password_len; // start sending PacketPassInterface_Sender_Send(o->control.send_if, (uint8_t *)o->buffer, size.value); // set state o->state = STATE_SENDING_PASSWORD; } break; default: ASSERT(0); } } break; case STATE_SENT_REQUEST: { BLog(BLOG_DEBUG, "received reply header"); struct socks_reply_header imsg; memcpy(&imsg, o->buffer, sizeof(imsg)); if (ntoh8(imsg.ver) != SOCKS_VERSION) { BLog(BLOG_NOTICE, "wrong version"); goto fail; } if (ntoh8(imsg.rep) != SOCKS_REP_SUCCEEDED) { BLog(BLOG_NOTICE, "reply not successful"); goto fail; } int addr_len; switch (ntoh8(imsg.atyp)) { case SOCKS_ATYP_IPV4: addr_len = sizeof(struct socks_addr_ipv4); break; case SOCKS_ATYP_IPV6: addr_len = sizeof(struct socks_addr_ipv6); break; default: BLog(BLOG_NOTICE, "reply has unknown address type"); goto fail; } // receive the rest of the reply start_receive(o, (uint8_t *)o->buffer + sizeof(imsg), addr_len); // set state o->state = STATE_RECEIVED_REPLY_HEADER; } break; case STATE_SENT_PASSWORD: { BLog(BLOG_DEBUG, "received password reply"); if (o->buffer[0] != 1) { BLog(BLOG_NOTICE, "password reply has unknown version"); goto fail; } if (o->buffer[1] != 0) { BLog(BLOG_NOTICE, "password reply is negative"); goto fail; } auth_finished(o); } break; case STATE_RECEIVED_REPLY_HEADER: { BLog(BLOG_DEBUG, "received reply rest"); // free buffer BFree(o->buffer); o->buffer = NULL; // free control I/O free_control_io(o); // init up I/O init_up_io(o); // set state o->state = STATE_UP; // call handler o->handler(o->user, BSOCKSCLIENT_EVENT_UP); return; } break; default: ASSERT(0); } return; fail: report_error(o, BSOCKSCLIENT_EVENT_ERROR); }
int pn_event_handler(void *handler_context, void *conn_context, pn_event_t *event, qd_connection_t *qd_conn) { qd_container_t *container = (qd_container_t*) handler_context; pn_connection_t *conn = qd_connection_pn(qd_conn); pn_session_t *ssn; pn_link_t *pn_link; qd_link_t *qd_link; pn_delivery_t *delivery; switch (pn_event_type(event)) { case PN_CONNECTION_REMOTE_OPEN : qd_connection_set_user(qd_conn); if (pn_connection_state(conn) & PN_LOCAL_UNINIT) { // This Open is an externally initiated connection // Let policy engine decide qd_connection_set_event_stall(qd_conn, true); qd_conn->open_container = (void *)container; qd_connection_invoke_deferred(qd_conn, qd_policy_amqp_open, qd_conn); } else { // This Open is in response to an internally initiated connection notify_opened(container, qd_conn, conn_context); } break; case PN_CONNECTION_REMOTE_CLOSE : if (pn_connection_state(conn) == (PN_LOCAL_ACTIVE | PN_REMOTE_CLOSED)) pn_connection_close(conn); break; case PN_SESSION_REMOTE_OPEN : if (!(pn_connection_state(conn) & PN_LOCAL_CLOSED)) { ssn = pn_event_session(event); if (pn_session_state(ssn) & PN_LOCAL_UNINIT) { if (qd_conn->policy_settings) { if (!qd_policy_approve_amqp_session(ssn, qd_conn)) { break; } qd_conn->n_sessions++; } qd_policy_apply_session_settings(ssn, qd_conn); pn_session_open(ssn); } } break; case PN_SESSION_REMOTE_CLOSE : if (!(pn_connection_state(conn) & PN_LOCAL_CLOSED)) { ssn = pn_event_session(event); if (pn_session_state(ssn) == (PN_LOCAL_ACTIVE | PN_REMOTE_CLOSED)) { // remote has nuked our session. Check for any links that were // left open and forcibly detach them, since no detaches will // arrive on this session. pn_connection_t *conn = pn_session_connection(ssn); pn_link_t *pn_link = pn_link_head(conn, PN_LOCAL_ACTIVE | PN_REMOTE_ACTIVE); while (pn_link) { if (pn_link_session(pn_link) == ssn) { qd_link_t *qd_link = (qd_link_t*) pn_link_get_context(pn_link); if (qd_link && qd_link->node) { if (qd_conn->policy_settings) { if (qd_link->direction == QD_OUTGOING) { qd_conn->n_receivers--; assert(qd_conn->n_receivers >= 0); } else { qd_conn->n_senders--; assert(qd_conn->n_senders >= 0); } } qd_log(container->log_source, QD_LOG_NOTICE, "Aborting link '%s' due to parent session end", pn_link_name(pn_link)); qd_link->node->ntype->link_detach_handler(qd_link->node->context, qd_link, QD_LOST); } } pn_link = pn_link_next(pn_link, PN_LOCAL_ACTIVE | PN_REMOTE_ACTIVE); } if (qd_conn->policy_settings) { qd_conn->n_sessions--; } pn_session_close(ssn); } } break; case PN_LINK_REMOTE_OPEN : if (!(pn_connection_state(conn) & PN_LOCAL_CLOSED)) { pn_link = pn_event_link(event); if (pn_link_state(pn_link) & PN_LOCAL_UNINIT) { if (pn_link_is_sender(pn_link)) { if (qd_conn->policy_settings) { if (!qd_policy_approve_amqp_receiver_link(pn_link, qd_conn)) { break; } qd_conn->n_receivers++; } setup_outgoing_link(container, pn_link); } else { if (qd_conn->policy_settings) { if (!qd_policy_approve_amqp_sender_link(pn_link, qd_conn)) { break; } qd_conn->n_senders++; } setup_incoming_link(container, pn_link); } } else if (pn_link_state(pn_link) & PN_LOCAL_ACTIVE) handle_link_open(container, pn_link); } break; case PN_LINK_REMOTE_CLOSE : case PN_LINK_REMOTE_DETACH : if (!(pn_connection_state(conn) & PN_LOCAL_CLOSED)) { pn_link = pn_event_link(event); qd_link = (qd_link_t*) pn_link_get_context(pn_link); if (qd_link) { qd_node_t *node = qd_link->node; qd_detach_type_t dt = pn_event_type(event) == PN_LINK_REMOTE_CLOSE ? QD_CLOSED : QD_DETACHED; if (node) node->ntype->link_detach_handler(node->context, qd_link, dt); else if (qd_link->pn_link == pn_link) { pn_link_close(pn_link); } if (qd_conn->policy_counted && qd_conn->policy_settings) { if (pn_link_is_sender(pn_link)) { qd_conn->n_receivers--; qd_log(container->log_source, QD_LOG_TRACE, "Closed receiver link %s. n_receivers: %d", pn_link_name(pn_link), qd_conn->n_receivers); assert (qd_conn->n_receivers >= 0); } else { qd_conn->n_senders--; qd_log(container->log_source, QD_LOG_TRACE, "Closed sender link %s. n_senders: %d", pn_link_name(pn_link), qd_conn->n_senders); assert (qd_conn->n_senders >= 0); } } if (qd_link->close_sess_with_link && qd_link->pn_sess && pn_link_state(pn_link) == (PN_LOCAL_CLOSED | PN_REMOTE_CLOSED)) pn_session_close(qd_link->pn_sess); } } break; case PN_LINK_FLOW : pn_link = pn_event_link(event); qd_link = (qd_link_t*) pn_link_get_context(pn_link); if (qd_link && qd_link->node && qd_link->node->ntype->link_flow_handler) qd_link->node->ntype->link_flow_handler(qd_link->node->context, qd_link); break; case PN_DELIVERY : delivery = pn_event_delivery(event); if (pn_delivery_readable(delivery)) do_receive(delivery); if (pn_delivery_updated(delivery)) { do_updated(delivery); pn_delivery_clear(delivery); } break; case PN_EVENT_NONE : case PN_REACTOR_INIT : case PN_REACTOR_QUIESCED : case PN_REACTOR_FINAL : case PN_TIMER_TASK : case PN_CONNECTION_INIT : case PN_CONNECTION_BOUND : case PN_CONNECTION_UNBOUND : case PN_CONNECTION_LOCAL_OPEN : case PN_CONNECTION_LOCAL_CLOSE : case PN_CONNECTION_FINAL : case PN_SESSION_INIT : case PN_SESSION_LOCAL_OPEN : case PN_SESSION_LOCAL_CLOSE : case PN_SESSION_FINAL : case PN_LINK_INIT : case PN_LINK_LOCAL_OPEN : case PN_LINK_LOCAL_CLOSE : case PN_LINK_LOCAL_DETACH : case PN_LINK_FINAL : case PN_TRANSPORT : case PN_TRANSPORT_ERROR : case PN_TRANSPORT_HEAD_CLOSED : case PN_TRANSPORT_TAIL_CLOSED : case PN_TRANSPORT_CLOSED : case PN_TRANSPORT_AUTHENTICATED : case PN_SELECTABLE_INIT : case PN_SELECTABLE_UPDATED : case PN_SELECTABLE_READABLE : case PN_SELECTABLE_WRITABLE : case PN_SELECTABLE_ERROR : case PN_SELECTABLE_EXPIRED : case PN_SELECTABLE_FINAL : break; } return 1; }
/** * This function gets called from within init_game * It is called repeatedly if we are the server, else * it is just called once. */ void do_gameplay(const int sock, int fire) { int x,y,res,win_status=0; char msg[100]; for (int i=0; i<BOARD_SIZE; ++i) { for (int j=0; j<BOARD_SIZE; ++j) { player_shots[i][j] = UNTOUCHED; peer_shots[i][j] = UNTOUCHED; } } initShips(); display_boards(); Ship sh; do { if (fire == 1) { /*you're the attacker*/ mvwprintw(status_win,1,1,"It's your turn! "); wrefresh(status_win); fire = 0; return_cords(&x, &y); res = do_fire(sock, x, y); place_hit_or_mis(player_win,res, x, y, false); switch (res) { case 0: mvwprintw(status_win,2,1,"Missed! "); wrefresh(status_win); break; case 1: mvwprintw(status_win,2,1,"You hit them! "); wrefresh(status_win); /* play_boom("You hit them!"); */ /* display_boards(); */ break; case -1: mvwprintw(status_win,2,1,"You sunk them! "); wrefresh(status_win); /* play_boom("You sunk them!"); */ /* display_boards(); */ break; case -2: win_status = 1; mvwprintw(status_win,2,1,"Game over! "); wrefresh(status_win); fire = -1; break; } } else { /*you're the defender*/ keypad(stdscr, FALSE); curs_set(0); // Set cursor invisible mvwprintw(status_win,1,1,"Waiting for other player to fire..."); wrefresh(status_win); res = do_receive(sock); refresh(); if (res == 0) { //wclear(status_win); mvwprintw(status_win,2,1,"They missed! "); //mvwprintw(status_win,5,1,"It's your turn!"); wrefresh(status_win); } else if (res < 0) { //negative res indicates sunken ship sh = getShipById(-1*res); /* what a hack... */ //wclear(status_win); mvwprintw(status_win,2,1,"They sunk your %s! ", sh.name); /* play_boom("They sunk you!"); */ /* display_boards(); */ //mvwprintw(status_win,5,1,"It's your turn!"); wrefresh(status_win); } else if (res==100);//do nothing...the game is over else { sh = getShipById(res); //wclear(status_win); mvwprintw(status_win,2,1,"They hit your %s! ", sh.name); /* play_boom("They hit you!"); */ /* display_boards(); */ //mvwprintw(status_win,5,1,"It's your turn!"); wrefresh(status_win); } mvwprintw(status_win,1,1,"It's your turn! "); fire = (check_game_over() == 1) ? -1 : 1; refresh(); } } while(fire > -1); sprintf(msg, "Game over! You %s!\nPress any key to view battlefields.", win_status ? "won" : "lost"); show_message_box(msg); getch(); exchange_shipsets(sock); show_battlefields(); }
void start() { do_receive(); }
/** * Selects once on sockets for receiving and sending stuff. * Monitors: * - the fd exchange pipe, for receiving descriptors to be handled here * - the tcp sockets of all serviced peers, triggering the incoming messages do_receive() * - the send pipes of all serviced peers, triggering the sending of outgoing messages * @returns 0 on normal exit or -1 on error */ int receive_loop(peer *original_peer) { fd_set rfds,efds; struct timeval tv; int n,max=0,cnt=0; AAAMessage *msg=0; serviced_peer_t *sp,*sp2; peer *p; int fd=-1; int fd_exchange_pipe_local=0; if (original_peer) fd_exchange_pipe_local = original_peer->fd_exchange_pipe_local; else fd_exchange_pipe_local = fd_exchange_pipe_unknown_local; // if (shutdownx) return -1; while(shutdownx&&!*shutdownx){ n = 0; while(!n){ if (shutdownx&&*shutdownx) break; cfg_update(); log_serviced_peers(); max =-1; FD_ZERO(&rfds); FD_ZERO(&efds); FD_SET(fd_exchange_pipe_local,&rfds); if (fd_exchange_pipe_local>max) max = fd_exchange_pipe_local; for(sp=serviced_peers;sp;sp=sp->next){ if (sp->tcp_socket>=0){ FD_SET(sp->tcp_socket,&rfds); FD_SET(sp->tcp_socket,&efds); if (sp->tcp_socket>max) max = sp->tcp_socket; } if (sp->send_pipe_fd>=0) { FD_SET(sp->send_pipe_fd,&rfds); if (sp->send_pipe_fd>max) max = sp->send_pipe_fd; } } tv.tv_sec=1; tv.tv_usec=0; n = select(max+1,&rfds,0,&efds,&tv); if (n==-1){ if (shutdownx&&*shutdownx) return 0; LM_ERR("select_recv(): %s\n",strerror(errno)); for(sp=serviced_peers;sp;sp=sp2){ sp2 = sp->next; disconnect_serviced_peer(sp,0); if (sp->p && sp->p->is_dynamic) drop_serviced_peer(sp,0); } sleep(1); break; }else if (n){ if (FD_ISSET(fd_exchange_pipe_local,&rfds)){ /* fd exchange */ LM_DBG("select_recv(): There is something on the fd exchange pipe\n"); p = 0; fd = -1; if (!receive_fd(fd_exchange_pipe_local,&fd,&p)){ LM_ERR("select_recv(): Error reading from fd exchange pipe\n"); }else{ LM_DBG("select_recv(): fd exchange pipe says fd [%d] for peer %p:[%.*s]\n",fd, p, p?p->fqdn.len:0, p?p->fqdn.s:0); if (p){ sp2=0; for(sp=serviced_peers;sp;sp=sp->next) if (sp->p==p){ sp2 = sp; break; } if (!sp2) sp2 = add_serviced_peer(p); else make_send_pipe(sp2); if (!sp2) { LM_ERR("Error on add_serviced_peer()\n"); continue; } sp2->tcp_socket = fd; if (p->state == Wait_Conn_Ack){ p->I_sock = fd; sm_process(p,I_Rcv_Conn_Ack,0,0,fd); }else{ p->R_sock = fd; } }else{ sp2 = add_serviced_peer(NULL); if (!sp2) { LM_ERR("Error on add_serviced_peer()\n"); continue; } sp2->tcp_socket = fd; } } } for(sp=serviced_peers;sp;){ if (sp->tcp_socket>=0 && FD_ISSET(sp->tcp_socket,&efds)) { LM_INFO("select_recv(): [%.*s] Peer socket [%d] found on the exception list... dropping\n", sp->p?sp->p->fqdn.len:0, sp->p?sp->p->fqdn.s:0, sp->tcp_socket); goto drop_peer; } if (sp->send_pipe_fd>=0 && FD_ISSET(sp->send_pipe_fd,&rfds)) { /* send */ LM_DBG("select_recv(): There is something on the send pipe\n"); cnt = read(sp->send_pipe_fd,&msg,sizeof(AAAMessage *)); if (cnt==0){ //This is very stupid and might not work well - droped messages... to be fixed LM_INFO("select_recv(): ReOpening pipe for read. This should not happen...\n"); close(sp->send_pipe_fd); sp->send_pipe_fd = open(sp->send_pipe_name.s, O_RDONLY | O_NDELAY); goto receive; } if (cnt<sizeof(AAAMessage *)){ if (cnt<0) LM_ERR("select_recv(): Error reading from send pipe\n"); goto receive; } LM_DBG("select_recv(): Send pipe says [%p] %d\n",msg,cnt); if (sp->tcp_socket<0){ LM_ERR("select_recv(): got a signal to send something, but the connection was not opened"); } else { while( (cnt=write(sp->tcp_socket,msg->buf.s,msg->buf.len))==-1 ) { if (errno==EINTR) continue; LM_ERR("select_recv(): [%.*s] write on socket [%d] returned error> %s... dropping\n", sp->p?sp->p->fqdn.len:0, sp->p?sp->p->fqdn.s:0, sp->tcp_socket, strerror(errno)); AAAFreeMessage(&msg); close(sp->tcp_socket); goto drop_peer; } if (cnt!=msg->buf.len){ LM_ERR("select_recv(): [%.*s] write on socket [%d] only wrote %d/%d bytes... dropping\n", sp->p?sp->p->fqdn.len:0, sp->p?sp->p->fqdn.s:0, sp->tcp_socket, cnt, msg->buf.len); AAAFreeMessage(&msg); close(sp->tcp_socket); goto drop_peer; } } AAAFreeMessage(&msg); //don't return, maybe there is something to read } receive: /* receive */ if (sp->tcp_socket>=0 && FD_ISSET(sp->tcp_socket,&rfds)) { errno=0; cnt = do_receive(sp); if (cnt<=0) { LM_INFO("select_recv(): [%.*s] read on socket [%d] returned %d > %s... dropping\n", sp->p?sp->p->fqdn.len:0, sp->p?sp->p->fqdn.s:0, sp->tcp_socket, cnt, errno?strerror(errno):""); goto drop_peer; } } //next_sp: /* go to next serviced peer */ sp=sp->next; continue; drop_peer: /* drop this serviced peer on error */ sp2 = sp->next; disconnect_serviced_peer(sp,0); if (sp->p && sp->p->is_dynamic) drop_serviced_peer(sp,0); sp = sp2; } } } } return 0; }