/* void response_client_open_ex(response_client_t *uclient, char *conn_str, char *(*id_f)(), char *(*pph_f)()) { uclient->id_f = id_f; uclient->passphrase_f = pph_f; response_client_open_int(uclient, conn_str); } */ int response_client_send(response_client_t *uclient, response_request_t *request) { int ret; long nbytes; char buff[UCLIENT_BUFSIZE]; #if defined(USERVER_ENCRYPTED) char cipher[UCLIENT_BUFSIZE]; char message[UCLIENT_BUFSIZE]; uint32_t timestamp; char sts[32]; char *passphrase = uclient->passphrase_f(); char otp[100]; char *id = uclient->id_f(); char challenge[32]; int len = 32; int len1 = 32; lvc_t lvc; timestamp = get_ts(); len1 = ts2str(timestamp, sts); generate_otp(otp, passphrase, sts); do_encrypt(challenge, &len, sts, len1, otp); lvc_init(&lvc, message, UCLIENT_BUFSIZE); fprintf(stdout, "lvc_pack id:%s\n", id); lvc_pack( &lvc, strlen(id), id ); fprintf(stdout, "lvc_pack ts:%u\n", timestamp); lvc_pack( &lvc, sizeof(uint32_t), (char *)×tamp ); fprintf(stdout, "lvc_pack challenge:%d\n", len); lvc_pack( &lvc, len, challenge ); #endif response_build_request(buff, sizeof(buff), request); nbytes = strlen(buff); #if defined(USERVER_ENCRYPTED) fprintf(stdout, "Message to send:%.*s\n", nbytes, buff); len = sizeof(cipher); do_encrypt(cipher, &len, buff, nbytes, otp); lvc_pack( &lvc, len, cipher ); lvc_pack_finish(&lvc); nbytes = lvc.len; ret = pj_sock_sendto(uclient->fd, lvc.data, &nbytes, 0, (const pj_sockaddr_t *)uclient->connect_data, sizeof(pj_sockaddr_in)); #else ret = pj_sock_sendto(uclient->fd, buff, &nbytes, 0, (const pj_sockaddr_t *)uclient->connect_data, sizeof(pj_sockaddr_in)); #endif if(ret != 0) { PERROR_IF_TRUE(1, "Error in sending data\n"); return -1; } return nbytes; }
/* * Send data */ PJ_DEF(pj_status_t) pj_sock_send( pj_sock_t sockfd, const void *buf, pj_ssize_t *len, unsigned flags) { return pj_sock_sendto(sockfd, buf, len, flags, NULL, 0); }
int main() { pj_sock_t sock; pj_sockaddr_in to_addr; char *s = "Cong hoa xa hoi chu nghia VietNam"; char buffer[100]; pj_ssize_t len; struct timeval tv_begin, tv_end, tv_diff; pj_log_set_level(3); CHECK(__FILE__, pj_init()); pj_bzero(buffer, sizeof(buffer)); CHECK_R( __FILE__, pj_sock_socket(PJ_AF_INET, PJ_SOCK_DGRAM, 0, &sock) ); //udp_socket(12345, &sock); setup_addr_with_host_and_port(&to_addr, "127.0.0.1", 33333); len = strlen(s); gettimeofday(&tv_begin, NULL); pj_sock_sendto(sock, s, &len, 0, &to_addr, sizeof(to_addr)); PJ_LOG(3, (__FILE__, "Sent: %s", s)); len = 100; pj_sock_recv(sock, buffer, &len, 0); gettimeofday(&tv_end, NULL); int diff = timeval_subtract(&tv_diff, &tv_end, &tv_begin); PJ_LOG(3, (__FILE__, "Received: %s %0.2f msec", buffer, diff*1.0/1000)); pj_shutdown(); return 0; }
/* * Callback to send packet. */ static pj_status_t udp_sendto(pj_turn_transport *tp, const void *packet, pj_size_t size, unsigned flag, const pj_sockaddr_t *addr, int addr_len) { pj_ssize_t len = size; return pj_sock_sendto(tp->listener->sock, packet, &len, flag, addr, addr_len); }
static int udp_sendto(int fd, char *buff, int len, void *data, unsigned int data_len) { int ret = 0; long nbytes = len; ret = pj_sock_sendto(fd, buff, &nbytes, 0, (pj_sockaddr_t *)data, data_len); if( ret != 0 ) { PERROR_IF_TRUE(1, "Error in sending data\n"); return -1; } return nbytes; }
// // sendto() // pj_ssize_t sendto( const void *buf, pj_size_t len, int flag, const Pj_Inet_Addr &addr) { pj_ssize_t bytes = len; if (pj_sock_sendto( sock_, buf, &bytes, flag, &addr, sizeof(pj_sockaddr_in)) != PJ_SUCCESS) { return -1; } return bytes; }
static pj_status_t server_send_msg(pj_stun_session *sess, void *token, const void *pkt, pj_size_t pkt_size, const pj_sockaddr_t *dst_addr, unsigned addr_len) { pj_ssize_t len = pkt_size; PJ_UNUSED_ARG(sess); PJ_UNUSED_ARG(token); return pj_sock_sendto(server->sock, pkt, &len, 0, dst_addr, addr_len); }
PJ_DEF(int) pj_ioqueue_sendto( pj_ioqueue_t *ioque, pj_ioqueue_key_t *key, const void *data, pj_size_t datalen, const pj_sockaddr_t *addr, int addrlen) { if (pj_sock_sendto(key->fd, data, datalen, 0, addr, addrlen) != (pj_ssize_t)datalen) return -1; pj_mutex_lock(ioque->mutex); key->op |= PJ_IOQUEUE_OP_SEND_TO; key->wr_buf = NULL; key->wr_buflen = datalen; PJ_FD_SET(key->fd, &ioque->wfdset); pj_mutex_unlock(ioque->mutex); return PJ_IOQUEUE_PENDING; }
static int worker_thread(void *arg) { pj_sock_t sock = (pj_sock_t)arg; char buf[512]; pj_status_t last_recv_err = PJ_SUCCESS, last_write_err = PJ_SUCCESS; while (!thread_quit_flag) { pj_ssize_t len; pj_status_t rc; pj_sockaddr_in addr; int addrlen; len = sizeof(buf); addrlen = sizeof(addr); rc = pj_sock_recvfrom(sock, buf, &len, 0, &addr, &addrlen); if (rc != 0) { if (rc != last_recv_err) { app_perror("...recv error", rc); last_recv_err = rc; } continue; } pj_atomic_add(total_bytes, (pj_atomic_value_t)len); rc = pj_sock_sendto(sock, buf, &len, 0, &addr, addrlen); if (rc != PJ_SUCCESS) { if (rc != last_write_err) { app_perror("...send error", rc); last_write_err = rc; } continue; } } return 0; }
static int send_recv_test(int sock_type, pj_sock_t ss, pj_sock_t cs, pj_sockaddr_in *dstaddr, pj_sockaddr_in *srcaddr, int addrlen) { enum { DATA_LEN = 16 }; char senddata[DATA_LEN+4], recvdata[DATA_LEN+4]; pj_ssize_t sent, received, total_received; pj_status_t rc; TRACE_(("test", "....create_random_string()")); pj_create_random_string(senddata, DATA_LEN); senddata[DATA_LEN-1] = '\0'; /* * Test send/recv small data. */ TRACE_(("test", "....sendto()")); if (dstaddr) { sent = DATA_LEN; rc = pj_sock_sendto(cs, senddata, &sent, 0, dstaddr, addrlen); if (rc != PJ_SUCCESS || sent != DATA_LEN) { app_perror("...sendto error", rc); rc = -140; goto on_error; } } else { sent = DATA_LEN; rc = pj_sock_send(cs, senddata, &sent, 0); if (rc != PJ_SUCCESS || sent != DATA_LEN) { app_perror("...send error", rc); rc = -145; goto on_error; } } TRACE_(("test", "....recv()")); if (srcaddr) { pj_sockaddr_in addr; int srclen = sizeof(addr); pj_bzero(&addr, sizeof(addr)); received = DATA_LEN; rc = pj_sock_recvfrom(ss, recvdata, &received, 0, &addr, &srclen); if (rc != PJ_SUCCESS || received != DATA_LEN) { app_perror("...recvfrom error", rc); rc = -150; goto on_error; } if (srclen != addrlen) return -151; if (pj_sockaddr_cmp(&addr, srcaddr) != 0) { char srcaddr_str[32], addr_str[32]; strcpy(srcaddr_str, pj_inet_ntoa(srcaddr->sin_addr)); strcpy(addr_str, pj_inet_ntoa(addr.sin_addr)); PJ_LOG(3,("test", "...error: src address mismatch (original=%s, " "recvfrom addr=%s)", srcaddr_str, addr_str)); return -152; } } else { /* Repeat recv() until all data is received. * This applies only for non-UDP of course, since for UDP * we would expect all data to be received in one packet. */ total_received = 0; do { received = DATA_LEN-total_received; rc = pj_sock_recv(ss, recvdata+total_received, &received, 0); if (rc != PJ_SUCCESS) { app_perror("...recv error", rc); rc = -155; goto on_error; } if (received <= 0) { PJ_LOG(3,("", "...error: socket has closed! (received=%d)", received)); rc = -156; goto on_error; } if (received != DATA_LEN-total_received) { if (sock_type != pj_SOCK_STREAM()) { PJ_LOG(3,("", "...error: expecting %u bytes, got %u bytes", DATA_LEN-total_received, received)); rc = -157; goto on_error; } } total_received += received; } while (total_received < DATA_LEN); } TRACE_(("test", "....memcmp()")); if (pj_memcmp(senddata, recvdata, DATA_LEN) != 0) { PJ_LOG(3,("","...error: received data mismatch " "(got:'%s' expecting:'%s'", recvdata, senddata)); rc = -160; goto on_error; } /* * Test send/recv big data. */ TRACE_(("test", "....sendto()")); if (dstaddr) { sent = BIG_DATA_LEN; rc = pj_sock_sendto(cs, bigdata, &sent, 0, dstaddr, addrlen); if (rc != PJ_SUCCESS || sent != BIG_DATA_LEN) { app_perror("...sendto error", rc); rc = -161; goto on_error; } } else { sent = BIG_DATA_LEN; rc = pj_sock_send(cs, bigdata, &sent, 0); if (rc != PJ_SUCCESS || sent != BIG_DATA_LEN) { app_perror("...send error", rc); rc = -165; goto on_error; } } TRACE_(("test", "....recv()")); /* Repeat recv() until all data is received. * This applies only for non-UDP of course, since for UDP * we would expect all data to be received in one packet. */ total_received = 0; do { received = BIG_DATA_LEN-total_received; rc = pj_sock_recv(ss, bigbuffer+total_received, &received, 0); if (rc != PJ_SUCCESS) { app_perror("...recv error", rc); rc = -170; goto on_error; } if (received <= 0) { PJ_LOG(3,("", "...error: socket has closed! (received=%d)", received)); rc = -173; goto on_error; } if (received != BIG_DATA_LEN-total_received) { if (sock_type != pj_SOCK_STREAM()) { PJ_LOG(3,("", "...error: expecting %u bytes, got %u bytes", BIG_DATA_LEN-total_received, received)); rc = -176; goto on_error; } } total_received += received; } while (total_received < BIG_DATA_LEN); TRACE_(("test", "....memcmp()")); if (pj_memcmp(bigdata, bigbuffer, BIG_DATA_LEN) != 0) { PJ_LOG(3,("", "...error: received data has been altered!")); rc = -180; goto on_error; } rc = 0; on_error: return rc; }
/* * select_test() * * Test main entry. */ int select_test() { pj_sock_t udp1=PJ_INVALID_SOCKET, udp2=PJ_INVALID_SOCKET; pj_sockaddr_in udp_addr; int status; int setcount[3]; pj_str_t s; const char data[] = "hello"; const int datalen = 5; pj_ssize_t sent, received; char buf[10]; pj_status_t rc; PJ_LOG(3, (THIS_FILE, "...Testing simple UDP select()")); // Create two UDP sockets. rc = pj_sock_socket( pj_AF_INET(), pj_SOCK_DGRAM(), 0, &udp1); if (rc != PJ_SUCCESS) { app_perror("...error: unable to create socket", rc); status=-10; goto on_return; } rc = pj_sock_socket( pj_AF_INET(), pj_SOCK_DGRAM(), 0, &udp2); if (udp2 == PJ_INVALID_SOCKET) { app_perror("...error: unable to create socket", rc); status=-20; goto on_return; } // Bind one of the UDP socket. pj_bzero(&udp_addr, sizeof(udp_addr)); udp_addr.sin_family = pj_AF_INET(); udp_addr.sin_port = UDP_PORT; udp_addr.sin_addr = pj_inet_addr(pj_cstr(&s, "127.0.0.1")); if (pj_sock_bind(udp2, &udp_addr, sizeof(udp_addr))) { status=-30; goto on_return; } // Send data. sent = datalen; rc = pj_sock_sendto(udp1, data, &sent, 0, &udp_addr, sizeof(udp_addr)); if (rc != PJ_SUCCESS || sent != datalen) { app_perror("...error: sendto() error", rc); status=-40; goto on_return; } // Sleep a bit. See http://trac.pjsip.org/repos/ticket/890 pj_thread_sleep(10); // Check that socket is marked as reable. // Note that select() may also report that sockets are writable. status = do_select(udp1, udp2, setcount); if (status < 0) { char errbuf[128]; pj_strerror(pj_get_netos_error(), errbuf, sizeof(errbuf)); PJ_LOG(1,(THIS_FILE, "...error: %s", errbuf)); status=-50; goto on_return; } if (status == 0) { status=-60; goto on_return; } if (setcount[READ_FDS] != 1) { status=-70; goto on_return; } if (setcount[WRITE_FDS] != 0) { if (setcount[WRITE_FDS] == 2) { PJ_LOG(3,(THIS_FILE, "...info: system reports writable sockets")); } else { status=-80; goto on_return; } } else { PJ_LOG(3,(THIS_FILE, "...info: system doesn't report writable sockets")); } if (setcount[EXCEPT_FDS] != 0) { status=-90; goto on_return; } // Read the socket to clear readable sockets. received = sizeof(buf); rc = pj_sock_recv(udp2, buf, &received, 0); if (rc != PJ_SUCCESS || received != 5) { status=-100; goto on_return; } status = 0; // Test timeout on the read part. // This won't necessarily return zero, as select() may report that // sockets are writable. setcount[0] = setcount[1] = setcount[2] = 0; status = do_select(udp1, udp2, setcount); if (status != 0 && status != setcount[WRITE_FDS]) { PJ_LOG(3,(THIS_FILE, "...error: expecting timeout but got %d sks set", status)); PJ_LOG(3,(THIS_FILE, " rdset: %d, wrset: %d, exset: %d", setcount[0], setcount[1], setcount[2])); status = -110; goto on_return; } if (setcount[READ_FDS] != 0) { PJ_LOG(3,(THIS_FILE, "...error: readable socket not expected")); status = -120; goto on_return; } status = 0; on_return: if (udp1 != PJ_INVALID_SOCKET) pj_sock_close(udp1); if (udp2 != PJ_INVALID_SOCKET) pj_sock_close(udp2); return status; }
PJ_DECL(pj_status_t) pjstun_get_mapped_addr( pj_pool_factory *pf, int sock_cnt, pj_sock_t sock[], const pj_str_t *srv1, int port1, const pj_str_t *srv2, int port2, pj_sockaddr_in mapped_addr[]) { pj_sockaddr_in srv_addr[2]; int i, j, send_cnt = 0; pj_pool_t *pool; struct { struct { pj_uint32_t mapped_addr; pj_uint32_t mapped_port; } srv[2]; } *rec; void *out_msg; pj_size_t out_msg_len; int wait_resp = 0; pj_status_t status; PJ_CHECK_STACK(); /* Create pool. */ pool = pj_pool_create(pf, "stun%p", 1024, 1024, NULL); if (!pool) return PJ_ENOMEM; /* Allocate client records */ rec = pj_pool_calloc(pool, sock_cnt, sizeof(*rec)); if (!rec) { status = PJ_ENOMEM; goto on_error; } /* Create the outgoing BIND REQUEST message template */ status = pjstun_create_bind_req( pool, &out_msg, &out_msg_len, pj_rand(), pj_rand()); if (status != PJ_SUCCESS) goto on_error; /* Resolve servers. */ status = pj_sockaddr_in_init(&srv_addr[0], srv1, (pj_uint16_t)port1); if (status != PJ_SUCCESS) goto on_error; status = pj_sockaddr_in_init(&srv_addr[1], srv2, (pj_uint16_t)port2); if (status != PJ_SUCCESS) goto on_error; /* Init mapped addresses to zero */ pj_memset(mapped_addr, 0, sock_cnt * sizeof(pj_sockaddr_in)); /* Main retransmission loop. */ for (send_cnt=0; send_cnt<MAX_REQUEST; ++send_cnt) { pj_time_val next_tx, now; pj_fd_set_t r; int select_rc; PJ_FD_ZERO(&r); /* Send messages to servers that has not given us response. */ for (i=0; i<sock_cnt && status==PJ_SUCCESS; ++i) { for (j=0; j<2 && status==PJ_SUCCESS; ++j) { pjstun_msg_hdr *msg_hdr = out_msg; pj_ssize_t sent_len; if (rec[i].srv[j].mapped_port != 0) continue; /* Modify message so that we can distinguish response. */ msg_hdr->tsx[2] = pj_htonl(i); msg_hdr->tsx[3] = pj_htonl(j); /* Send! */ sent_len = out_msg_len; status = pj_sock_sendto(sock[i], out_msg, &sent_len, 0, (pj_sockaddr_t*)&srv_addr[j], sizeof(pj_sockaddr_in)); if (status == PJ_SUCCESS) ++wait_resp; } } /* All requests sent. * The loop below will wait for responses until all responses have * been received (i.e. wait_resp==0) or timeout occurs, which then * we'll go to the next retransmission iteration. */ /* Calculate time of next retransmission. */ pj_gettimeofday(&next_tx); next_tx.sec += (stun_timer[send_cnt]/1000); next_tx.msec += (stun_timer[send_cnt]%1000); pj_time_val_normalize(&next_tx); for (pj_gettimeofday(&now), select_rc=1; status==PJ_SUCCESS && select_rc==1 && wait_resp>0 && PJ_TIME_VAL_LT(now, next_tx); pj_gettimeofday(&now)) { pj_time_val timeout; timeout = next_tx; PJ_TIME_VAL_SUB(timeout, now); for (i=0; i<sock_cnt; ++i) { PJ_FD_SET(sock[i], &r); } select_rc = pj_sock_select(FD_SETSIZE, &r, NULL, NULL, &timeout); if (select_rc < 1) continue; for (i=0; i<sock_cnt; ++i) { int sock_idx, srv_idx; pj_ssize_t len; pjstun_msg msg; pj_sockaddr_in addr; int addrlen = sizeof(addr); pjstun_mapped_addr_attr *attr; char recv_buf[128]; if (!PJ_FD_ISSET(sock[i], &r)) continue; len = sizeof(recv_buf); status = pj_sock_recvfrom( sock[i], recv_buf, &len, 0, (pj_sockaddr_t*)&addr, &addrlen); --wait_resp; if (status != PJ_SUCCESS) continue; status = pjstun_parse_msg(recv_buf, len, &msg); if (status != PJ_SUCCESS) { continue; } sock_idx = pj_ntohl(msg.hdr->tsx[2]); srv_idx = pj_ntohl(msg.hdr->tsx[3]); if (sock_idx<0 || sock_idx>=sock_cnt || srv_idx<0 || srv_idx>=2) { status = PJLIB_UTIL_ESTUNININDEX; continue; } if (pj_ntohs(msg.hdr->type) != PJSTUN_BINDING_RESPONSE) { status = PJLIB_UTIL_ESTUNNOBINDRES; continue; } if (pjstun_msg_find_attr(&msg, PJSTUN_ATTR_ERROR_CODE) != NULL) { status = PJLIB_UTIL_ESTUNRECVERRATTR; continue; } attr = (void*)pjstun_msg_find_attr(&msg, PJSTUN_ATTR_MAPPED_ADDR); if (!attr) { status = PJLIB_UTIL_ESTUNNOMAP; continue; } rec[sock_idx].srv[srv_idx].mapped_addr = attr->addr; rec[sock_idx].srv[srv_idx].mapped_port = attr->port; } } /* The best scenario is if all requests have been replied. * Then we don't need to go to the next retransmission iteration. */ if (wait_resp <= 0) break; } for (i=0; i<sock_cnt && status==PJ_SUCCESS; ++i) { if (rec[i].srv[0].mapped_addr == rec[i].srv[1].mapped_addr && rec[i].srv[0].mapped_port == rec[i].srv[1].mapped_port) { mapped_addr[i].sin_family = PJ_AF_INET; mapped_addr[i].sin_addr.s_addr = rec[i].srv[0].mapped_addr; mapped_addr[i].sin_port = (pj_uint16_t)rec[i].srv[0].mapped_port; if (rec[i].srv[0].mapped_addr == 0 || rec[i].srv[0].mapped_port == 0) { status = PJLIB_UTIL_ESTUNNOTRESPOND; break; } } else { status = PJLIB_UTIL_ESTUNSYMMETRIC; break; } } pj_pool_release(pool); return status; on_error: if (pool) pj_pool_release(pool); return status; }
static int thread_proc(void *data) { dupsock_t *p_dupsock = (dupsock_t *)data; pj_thread_t *thread; pj_thread_desc desc; struct pj_time_val tv; int ret; pj_ssize_t ntemp; int sock_len; // pj_bzero(desc, sizeof(desc)); // CHECK(__FILE__, pj_thread_register("dupsock", desc, &thread)); PJ_FD_ZERO(&(p_dupsock->rfds)); PJ_FD_ZERO(&(p_dupsock->wfds)); PJ_FD_SET(*(p_dupsock->p_sock), &(p_dupsock->rfds)); tv.sec = 0; tv.msec = 20; // MAIN LOOP p_dupsock->b_quit = 0; while( (!p_dupsock->b_quit) || (p_dupsock->wait_cnt > 0) || (p_dupsock->to_send != NULL) ) { if(p_dupsock->wait_cnt > 0) { p_dupsock->wait_cnt--; pj_event_pulse(p_dupsock->p_event); } pj_thread_sleep(10); PJ_FD_ZERO(&(p_dupsock->rfds)); PJ_FD_ZERO(&(p_dupsock->wfds)); PJ_FD_SET(*(p_dupsock->p_sock), &(p_dupsock->rfds)); if(p_dupsock->to_send != NULL) PJ_FD_SET(*(p_dupsock->p_sock), &(p_dupsock->wfds)); ret = pj_sock_select(*(p_dupsock->p_sock) + 1, &(p_dupsock->rfds), &(p_dupsock->wfds), NULL, &tv); if( ret < 0 ) { PJ_LOG(2, (__FILE__, "Error in select")); } else if (ret > 0) { if( PJ_FD_ISSET( *(p_dupsock->p_sock), &(p_dupsock->rfds)) ) { ntemp = sizeof(p_dupsock->in_buffer); sock_len = sizeof(p_dupsock->in_packet.addr); pj_sock_recvfrom(*(p_dupsock->p_sock), p_dupsock->in_packet.data, &ntemp, 0, (pj_sockaddr_t *)(&(p_dupsock->in_packet.addr)), &sock_len); p_dupsock->in_packet.len = ntemp; if(p_dupsock->recv_callback != NULL) { p_dupsock->recv_callback(p_dupsock); } p_dupsock->in_packet.len = 0; } if( PJ_FD_ISSET( *(p_dupsock->p_sock), &(p_dupsock->wfds)) ) { ntemp = p_dupsock->to_send->len - p_dupsock->to_send->sent; pj_sock_sendto(*(p_dupsock->p_sock), p_dupsock->to_send->data + p_dupsock->to_send->sent, &ntemp, 0, (pj_sockaddr_t *)(&(p_dupsock->to_send->addr)), sizeof(p_dupsock->to_send->addr)); p_dupsock->to_send->sent += ntemp; if(p_dupsock->to_send->len == p_dupsock->to_send->sent) { if(p_dupsock->send_callback != NULL) { p_dupsock->send_callback(p_dupsock); } p_dupsock->to_send = NULL; PJ_FD_CLR(*(p_dupsock->p_sock), &(p_dupsock->wfds)); } } } //PJ_LOG(5, (__FILE__, "end of a loop")); } pj_event_destroy(p_dupsock->p_event); pj_sock_close(*(p_dupsock->p_sock)); return 0; }
/* * unregister_test() * Check if callback is still called after socket has been unregistered or * closed. */ static int unregister_test(pj_bool_t allow_concur) { enum { RPORT = 50000, SPORT = 50001 }; pj_pool_t *pool; pj_ioqueue_t *ioqueue; pj_sock_t ssock; pj_sock_t rsock; int addrlen; pj_sockaddr_in addr; pj_ioqueue_key_t *key; pj_ioqueue_op_key_t opkey; pj_ioqueue_callback cb; unsigned packet_cnt; char sendbuf[10], recvbuf[10]; pj_ssize_t bytes; pj_time_val timeout; pj_status_t status; pool = pj_pool_create(mem, "test", 4000, 4000, NULL); if (!pool) { app_perror("Unable to create pool", PJ_ENOMEM); return -100; } status = pj_ioqueue_create(pool, 16, &ioqueue); if (status != PJ_SUCCESS) { app_perror("Error creating ioqueue", status); return -110; } // Set concurrency TRACE_("set concurrency..."); status = pj_ioqueue_set_default_concurrency(ioqueue, allow_concur); if (status != PJ_SUCCESS) { return -112; } /* Create sender socket */ status = app_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, SPORT, &ssock); if (status != PJ_SUCCESS) { app_perror("Error initializing socket", status); return -120; } /* Create receiver socket. */ status = app_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, RPORT, &rsock); if (status != PJ_SUCCESS) { app_perror("Error initializing socket", status); return -130; } /* Register rsock to ioqueue. */ pj_bzero(&cb, sizeof(cb)); cb.on_read_complete = &on_read_complete; packet_cnt = 0; status = pj_ioqueue_register_sock(pool, ioqueue, rsock, &packet_cnt, &cb, &key); if (status != PJ_SUCCESS) { app_perror("Error registering to ioqueue", status); return -140; } /* Init operation key. */ pj_ioqueue_op_key_init(&opkey, sizeof(opkey)); /* Start reading. */ bytes = sizeof(recvbuf); status = pj_ioqueue_recv( key, &opkey, recvbuf, &bytes, 0); if (status != PJ_EPENDING) { app_perror("Expecting PJ_EPENDING, but got this", status); return -150; } /* Init destination address. */ addrlen = sizeof(addr); status = pj_sock_getsockname(rsock, &addr, &addrlen); if (status != PJ_SUCCESS) { app_perror("getsockname error", status); return -160; } /* Override address with 127.0.0.1, since getsockname will return * zero in the address field. */ addr.sin_addr = pj_inet_addr2("127.0.0.1"); /* Init buffer to send */ pj_ansi_strcpy(sendbuf, "Hello0123"); /* Send one packet. */ bytes = sizeof(sendbuf); status = pj_sock_sendto(ssock, sendbuf, &bytes, 0, &addr, sizeof(addr)); if (status != PJ_SUCCESS) { app_perror("sendto error", status); return -170; } /* Check if packet is received. */ timeout.sec = 1; timeout.msec = 0; #ifdef PJ_SYMBIAN pj_symbianos_poll(-1, 1000); #else pj_ioqueue_poll(ioqueue, &timeout); #endif if (packet_cnt != 1) { return -180; } /* Just to make sure things are settled.. */ pj_thread_sleep(100); /* Start reading again. */ bytes = sizeof(recvbuf); status = pj_ioqueue_recv( key, &opkey, recvbuf, &bytes, 0); if (status != PJ_EPENDING) { app_perror("Expecting PJ_EPENDING, but got this", status); return -190; } /* Reset packet counter */ packet_cnt = 0; /* Send one packet. */ bytes = sizeof(sendbuf); status = pj_sock_sendto(ssock, sendbuf, &bytes, 0, &addr, sizeof(addr)); if (status != PJ_SUCCESS) { app_perror("sendto error", status); return -200; } /* Now unregister and close socket. */ pj_ioqueue_unregister(key); /* Poll ioqueue. */ #ifdef PJ_SYMBIAN pj_symbianos_poll(-1, 1000); #else timeout.sec = 1; timeout.msec = 0; pj_ioqueue_poll(ioqueue, &timeout); #endif /* Must NOT receive any packets after socket is closed! */ if (packet_cnt > 0) { PJ_LOG(3,(THIS_FILE, "....errror: not expecting to receive packet " "after socket has been closed")); return -210; } /* Success */ pj_sock_close(ssock); pj_ioqueue_destroy(ioqueue); pj_pool_release(pool); return 0; }
static int server_thread_proc(void *p) { struct stun_test_session *test_sess = (struct stun_test_session*)p; pj_pool_t *pool; pj_status_t status; PJ_LOG(4,(THIS_FILE, "Server thread running")); pool = pj_pool_create(test_sess->stun_cfg.pf, "server", 512, 512, NULL); while (!test_sess->thread_quit_flag) { pj_time_val timeout = {0, 10}; pj_fd_set_t rdset; int n; /* Serve client */ PJ_FD_ZERO(&rdset); PJ_FD_SET(test_sess->server_sock, &rdset); n = pj_sock_select(test_sess->server_sock+1, &rdset, NULL, NULL, &timeout); if (n==1 && PJ_FD_ISSET(test_sess->server_sock, &rdset)) { pj_uint8_t pkt[512]; pj_ssize_t pkt_len; pj_size_t res_len; pj_sockaddr client_addr; int addr_len; pj_stun_msg *stun_req, *stun_res; pj_pool_reset(pool); /* Got query */ pkt_len = sizeof(pkt); addr_len = sizeof(client_addr); status = pj_sock_recvfrom(test_sess->server_sock, pkt, &pkt_len, 0, &client_addr, &addr_len); if (status != PJ_SUCCESS) { continue; } status = pj_stun_msg_decode(pool, pkt, pkt_len, PJ_STUN_IS_DATAGRAM, &stun_req, NULL, NULL); if (status != PJ_SUCCESS) { PJ_PERROR(1,(THIS_FILE, status, "STUN request decode error")); continue; } status = pj_stun_msg_create_response(pool, stun_req, PJ_STUN_SC_BAD_REQUEST, NULL, &stun_res); if (status != PJ_SUCCESS) { PJ_PERROR(1,(THIS_FILE, status, "STUN create response error")); continue; } status = pj_stun_msg_encode(stun_res, pkt, sizeof(pkt), 0, NULL, &res_len); if (status != PJ_SUCCESS) { PJ_PERROR(1,(THIS_FILE, status, "STUN encode error")); continue; } /* Ignore request */ if (test_sess->param.server_drop_request) continue; /* Wait for signal to continue */ if (test_sess->param.server_wait_for_event) pj_event_wait(test_sess->server_event); pkt_len = res_len; pj_sock_sendto(test_sess->server_sock, pkt, &pkt_len, 0, &client_addr, pj_sockaddr_get_len(&client_addr)); } } pj_pool_release(pool); PJ_LOG(4,(THIS_FILE, "Server thread quitting")); return 0; }
/* * Callback notification from STUN session when it receives STUN * indications. This callback was trigger by STUN incoming message * processing in pj_turn_allocation_on_rx_client_pkt(). */ static pj_status_t stun_on_rx_indication(pj_stun_session *sess, const pj_uint8_t *pkt, unsigned pkt_len, const pj_stun_msg *msg, void *token, const pj_sockaddr_t *src_addr, unsigned src_addr_len) { pj_stun_xor_peer_addr_attr *peer_attr; pj_stun_data_attr *data_attr; pj_turn_allocation *alloc; pj_turn_permission *perm; pj_ssize_t len; PJ_UNUSED_ARG(pkt); PJ_UNUSED_ARG(pkt_len); PJ_UNUSED_ARG(token); PJ_UNUSED_ARG(src_addr); PJ_UNUSED_ARG(src_addr_len); alloc = (pj_turn_allocation*) pj_stun_session_get_user_data(sess); /* Only expect Send Indication */ if (msg->hdr.type != PJ_STUN_SEND_INDICATION) { /* Ignore */ return PJ_SUCCESS; } /* Get XOR-PEER-ADDRESS attribute */ peer_attr = (pj_stun_xor_peer_addr_attr*) pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_XOR_PEER_ADDR, 0); /* MUST have XOR-PEER-ADDRESS attribute */ if (!peer_attr) return PJ_SUCCESS; /* Get DATA attribute */ data_attr = (pj_stun_data_attr*) pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_DATA, 0); /* Create/update/refresh the permission */ perm = lookup_permission_by_addr(alloc, &peer_attr->sockaddr, pj_sockaddr_get_len(&peer_attr->sockaddr)); if (perm == NULL) { perm = create_permission(alloc, &peer_attr->sockaddr, pj_sockaddr_get_len(&peer_attr->sockaddr)); } refresh_permission(perm); /* Return if we don't have data */ if (data_attr == NULL) return PJ_SUCCESS; /* Relay the data to peer */ len = data_attr->length; pj_sock_sendto(alloc->relay.tp.sock, data_attr->data, &len, 0, &peer_attr->sockaddr, pj_sockaddr_get_len(&peer_attr->sockaddr)); return PJ_SUCCESS; }
/* * pj_ioqueue_sendto() * * Start asynchronous write() to the descriptor. */ PJ_DEF(pj_status_t) pj_ioqueue_sendto( pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, const void *data, pj_ssize_t *length, pj_uint32_t flags, const pj_sockaddr_t *addr, int addrlen) { struct write_operation *write_op; unsigned retry; pj_status_t status; pj_ssize_t sent; PJ_ASSERT_RETURN(key && op_key && data && length, PJ_EINVAL); PJ_CHECK_STACK(); /* Check if key is closing. */ if (IS_CLOSING(key)) return PJ_ECANCELLED; /* We can not use PJ_IOQUEUE_ALWAYS_ASYNC for socket write */ flags &= ~(PJ_IOQUEUE_ALWAYS_ASYNC); /* Fast track: * Try to send data immediately, only if there's no pending write! * Note: * We are speculating that the list is empty here without properly * acquiring ioqueue's mutex first. This is intentional, to maximize * performance via parallelism. * * This should be safe, because: * - by convention, we require caller to make sure that the * key is not unregistered while other threads are invoking * an operation on the same key. * - pj_list_empty() is safe to be invoked by multiple threads, * even when other threads are modifying the list. */ if (pj_list_empty(&key->write_list)) { /* * See if data can be sent immediately. */ sent = *length; status = pj_sock_sendto(key->fd, data, &sent, flags, addr, addrlen); if (status == PJ_SUCCESS) { /* Success! */ *length = sent; return PJ_SUCCESS; } else { /* If error is not EWOULDBLOCK (or EAGAIN on Linux), report * the error to caller. */ if (status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL)) { return status; } status = status; } } /* * Check that address storage can hold the address parameter. */ PJ_ASSERT_RETURN(addrlen <= (int)sizeof(pj_sockaddr_in), PJ_EBUG); /* * Schedule asynchronous send. */ write_op = (struct write_operation*)op_key; /* Spin if write_op has pending operation */ for (retry=0; write_op->op != 0 && retry<PENDING_RETRY; ++retry) pj_thread_sleep(0); /* Last chance */ if (write_op->op) { /* Unable to send packet because there is already pending write on the * write_op. We could not put the operation into the write_op * because write_op already contains a pending operation! And * we could not send the packet directly with sendto() either, * because that will break the order of the packet. So we can * only return error here. * * This could happen for example in multithreads program, * where polling is done by one thread, while other threads are doing * the sending only. If the polling thread runs on lower priority * than the sending thread, then it's possible that the pending * write flag is not cleared in-time because clearing is only done * during polling. * * Aplication should specify multiple write operation keys on * situation like this. */ //pj_assert(!"ioqueue: there is pending operation on this key!"); return PJ_EBUSY; } write_op->op = PJ_IOQUEUE_OP_SEND_TO; write_op->buf = (char*)data; write_op->size = *length; write_op->written = 0; write_op->flags = flags; pj_memcpy(&write_op->rmt_addr, addr, addrlen); write_op->rmt_addrlen = addrlen; pj_mutex_lock(key->mutex); pj_list_insert_before(&key->write_list, write_op); ioqueue_add_to_set(key->ioqueue, key, WRITEABLE_EVENT); pj_mutex_unlock(key->mutex); return PJ_EPENDING; }
/* * ioqueue_dispatch_event() * * Report occurence of an event in the key to be processed by the * framework. */ void ioqueue_dispatch_write_event(pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h) { /* Lock the key. */ pj_mutex_lock(h->mutex); if (IS_CLOSING(h)) { pj_mutex_unlock(h->mutex); return; } #if defined(PJ_HAS_TCP) && PJ_HAS_TCP!=0 if (h->connecting) { /* Completion of connect() operation */ pj_ssize_t bytes_transfered; pj_bool_t has_lock; /* Clear operation. */ h->connecting = 0; ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT); ioqueue_remove_from_set(ioqueue, h, EXCEPTION_EVENT); #if (defined(PJ_HAS_SO_ERROR) && PJ_HAS_SO_ERROR!=0) /* from connect(2): * On Linux, use getsockopt to read the SO_ERROR option at * level SOL_SOCKET to determine whether connect() completed * successfully (if SO_ERROR is zero). */ { int value; int vallen = sizeof(value); int gs_rc = pj_sock_getsockopt(h->fd, SOL_SOCKET, SO_ERROR, &value, &vallen); if (gs_rc != 0) { /* Argh!! What to do now??? * Just indicate that the socket is connected. The * application will get error as soon as it tries to use * the socket to send/receive. */ bytes_transfered = 0; } else { bytes_transfered = value; } } #elif defined(PJ_WIN32) && PJ_WIN32!=0 bytes_transfered = 0; /* success */ #else /* Excellent information in D.J. Bernstein page: * http://cr.yp.to/docs/connect.html * * Seems like the most portable way of detecting connect() * failure is to call getpeername(). If socket is connected, * getpeername() will return 0. If the socket is not connected, * it will return ENOTCONN, and read(fd, &ch, 1) will produce * the right errno through error slippage. This is a combination * of suggestions from Douglas C. Schmidt and Ken Keys. */ { int gp_rc; struct sockaddr_in addr; socklen_t addrlen = sizeof(addr); gp_rc = getpeername(h->fd, (struct sockaddr*)&addr, &addrlen); bytes_transfered = (gp_rc < 0) ? gp_rc : -gp_rc; } #endif /* Unlock; from this point we don't need to hold key's mutex * (unless concurrency is disabled, which in this case we should * hold the mutex while calling the callback) */ if (h->allow_concurrent) { /* concurrency may be changed while we're in the callback, so * save it to a flag. */ has_lock = PJ_FALSE; pj_mutex_unlock(h->mutex); } else { has_lock = PJ_TRUE; } /* Call callback. */ if (h->cb.on_connect_complete && !IS_CLOSING(h)) (*h->cb.on_connect_complete)(h, bytes_transfered); /* Unlock if we still hold the lock */ if (has_lock) { pj_mutex_unlock(h->mutex); } /* Done. */ } else #endif /* PJ_HAS_TCP */ if (key_has_pending_write(h)) { /* Socket is writable. */ struct write_operation *write_op; pj_ssize_t sent; pj_status_t send_rc; /* Get the first in the queue. */ write_op = h->write_list.next; /* For datagrams, we can remove the write_op from the list * so that send() can work in parallel. */ if (h->fd_type == pj_SOCK_DGRAM()) { pj_list_erase(write_op); if (pj_list_empty(&h->write_list)) ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT); } /* Send the data. * Unfortunately we must do this while holding key's mutex, thus * preventing parallel write on a single key.. :-(( */ sent = write_op->size - write_op->written; if (write_op->op == PJ_IOQUEUE_OP_SEND) { send_rc = pj_sock_send(h->fd, write_op->buf+write_op->written, &sent, write_op->flags); /* Can't do this. We only clear "op" after we're finished sending * the whole buffer. */ //write_op->op = 0; } else if (write_op->op == PJ_IOQUEUE_OP_SEND_TO) { send_rc = pj_sock_sendto(h->fd, write_op->buf+write_op->written, &sent, write_op->flags, &write_op->rmt_addr, write_op->rmt_addrlen); /* Can't do this. We only clear "op" after we're finished sending * the whole buffer. */ //write_op->op = 0; } else { pj_assert(!"Invalid operation type!"); write_op->op = PJ_IOQUEUE_OP_NONE; send_rc = PJ_EBUG; } if (send_rc == PJ_SUCCESS) { write_op->written += sent; } else { pj_assert(send_rc > 0); write_op->written = -send_rc; } /* Are we finished with this buffer? */ if (send_rc!=PJ_SUCCESS || write_op->written == (pj_ssize_t)write_op->size || h->fd_type == pj_SOCK_DGRAM()) { pj_bool_t has_lock; write_op->op = PJ_IOQUEUE_OP_NONE; if (h->fd_type != pj_SOCK_DGRAM()) { /* Write completion of the whole stream. */ pj_list_erase(write_op); /* Clear operation if there's no more data to send. */ if (pj_list_empty(&h->write_list)) ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT); } /* Unlock; from this point we don't need to hold key's mutex * (unless concurrency is disabled, which in this case we should * hold the mutex while calling the callback) */ if (h->allow_concurrent) { /* concurrency may be changed while we're in the callback, so * save it to a flag. */ has_lock = PJ_FALSE; pj_mutex_unlock(h->mutex); } else { has_lock = PJ_TRUE; } /* Call callback. */ if (h->cb.on_write_complete && !IS_CLOSING(h)) { (*h->cb.on_write_complete)(h, (pj_ioqueue_op_key_t*)write_op, write_op->written); } if (has_lock) { pj_mutex_unlock(h->mutex); } } else { pj_mutex_unlock(h->mutex); } /* Done. */ } else { /* * This is normal; execution may fall here when multiple threads * are signalled for the same event, but only one thread eventually * able to process the event. */ pj_mutex_unlock(h->mutex); } }
/* * Handle incoming packet from client. This would have been called by * server upon receiving packet from a listener. */ PJ_DEF(void) pj_turn_allocation_on_rx_client_pkt(pj_turn_allocation *alloc, pj_turn_pkt *pkt) { pj_bool_t is_stun; pj_status_t status; /* Lock this allocation */ pj_lock_acquire(alloc->lock); /* Quickly check if this is STUN message */ is_stun = ((*((pj_uint8_t*)pkt->pkt) & 0xC0) == 0); if (is_stun) { /* * This could be an incoming STUN requests or indications. * Pass this through to the STUN session, which will call * our stun_on_rx_request() or stun_on_rx_indication() * callbacks. * * Note: currently it is necessary to specify the * PJ_STUN_NO_FINGERPRINT_CHECK otherwise the FINGERPRINT * attribute inside STUN Send Indication message will mess up * with fingerprint checking. */ unsigned options = PJ_STUN_CHECK_PACKET | PJ_STUN_NO_FINGERPRINT_CHECK; pj_size_t parsed_len = 0; if (pkt->transport->listener->tp_type == PJ_TURN_TP_UDP) options |= PJ_STUN_IS_DATAGRAM; status = pj_stun_session_on_rx_pkt(alloc->sess, pkt->pkt, pkt->len, options, NULL, &parsed_len, &pkt->src.clt_addr, pkt->src_addr_len); if (pkt->transport->listener->tp_type == PJ_TURN_TP_UDP) { pkt->len = 0; } else if (parsed_len > 0) { if (parsed_len == pkt->len) { pkt->len = 0; } else { pj_memmove(pkt->pkt, pkt->pkt+parsed_len, pkt->len - parsed_len); pkt->len -= parsed_len; } } if (status != PJ_SUCCESS) { alloc_err(alloc, "Error handling STUN packet", status); goto on_return; } } else { /* * This is not a STUN packet, must be ChannelData packet. */ pj_turn_channel_data *cd = (pj_turn_channel_data*)pkt->pkt; pj_turn_permission *perm; pj_ssize_t len; pj_assert(sizeof(*cd)==4); /* For UDP check the packet length */ if (alloc->transport->listener->tp_type == PJ_TURN_TP_UDP) { if (pkt->len < pj_ntohs(cd->length)+sizeof(*cd)) { PJ_LOG(4,(alloc->obj_name, "ChannelData from %s discarded: UDP size error", alloc->info)); goto on_return; } } else { pj_assert(!"Unsupported transport"); goto on_return; } perm = lookup_permission_by_chnum(alloc, pj_ntohs(cd->ch_number)); if (!perm) { /* Discard */ PJ_LOG(4,(alloc->obj_name, "ChannelData from %s discarded: ch#0x%x not found", alloc->info, pj_ntohs(cd->ch_number))); goto on_return; } /* Relay the data */ len = pj_ntohs(cd->length); pj_sock_sendto(alloc->relay.tp.sock, cd+1, &len, 0, &perm->hkey.peer_addr, pj_sockaddr_get_len(&perm->hkey.peer_addr)); /* Refresh permission */ refresh_permission(perm); } on_return: /* Release lock */ pj_lock_release(alloc->lock); }
PJ_DEF(pj_status_t) pjstun_get_mapped_addr( pj_pool_factory *pf, int sock_cnt, pj_sock_t sock[], const pj_str_t *srv1, int port1, const pj_str_t *srv2, int port2, pj_sockaddr_in mapped_addr[]) { unsigned srv_cnt; pj_sockaddr_in srv_addr[2]; int i, j, send_cnt = 0, nfds; pj_pool_t *pool; struct query_rec { struct { pj_uint32_t mapped_addr; pj_uint32_t mapped_port; } srv[2]; } *rec; void *out_msg; pj_size_t out_msg_len; int wait_resp = 0; pj_status_t status; PJ_CHECK_STACK(); TRACE_((THIS_FILE, "Entering pjstun_get_mapped_addr()")); /* Create pool. */ pool = pj_pool_create(pf, "stun%p", 400, 400, NULL); if (!pool) return PJ_ENOMEM; /* Allocate client records */ rec = (struct query_rec*) pj_pool_calloc(pool, sock_cnt, sizeof(*rec)); if (!rec) { status = PJ_ENOMEM; goto on_error; } TRACE_((THIS_FILE, " Memory allocated.")); /* Create the outgoing BIND REQUEST message template */ status = pjstun_create_bind_req( pool, &out_msg, &out_msg_len, pj_rand(), pj_rand()); if (status != PJ_SUCCESS) goto on_error; TRACE_((THIS_FILE, " Binding request created.")); /* Resolve servers. */ status = pj_sockaddr_in_init(&srv_addr[0], srv1, (pj_uint16_t)port1); if (status != PJ_SUCCESS) goto on_error; srv_cnt = 1; if (srv2 && port2) { status = pj_sockaddr_in_init(&srv_addr[1], srv2, (pj_uint16_t)port2); if (status != PJ_SUCCESS) goto on_error; if (srv_addr[1].sin_addr.s_addr != srv_addr[0].sin_addr.s_addr && srv_addr[1].sin_port != srv_addr[0].sin_port) { srv_cnt++; } } TRACE_((THIS_FILE, " Server initialized, using %d server(s)", srv_cnt)); /* Init mapped addresses to zero */ pj_memset(mapped_addr, 0, sock_cnt * sizeof(pj_sockaddr_in)); /* We need these many responses */ wait_resp = sock_cnt * srv_cnt; TRACE_((THIS_FILE, " Done initialization.")); #if defined(PJ_SELECT_NEEDS_NFDS) && PJ_SELECT_NEEDS_NFDS!=0 nfds = -1; for (i=0; i<sock_cnt; ++i) { if (sock[i] > nfds) { nfds = sock[i]; } } #else nfds = FD_SETSIZE-1; #endif /* Main retransmission loop. */ for (send_cnt=0; send_cnt<MAX_REQUEST; ++send_cnt) { pj_time_val next_tx, now; pj_fd_set_t r; int select_rc; PJ_FD_ZERO(&r); /* Send messages to servers that has not given us response. */ for (i=0; i<sock_cnt && status==PJ_SUCCESS; ++i) { for (j=0; j<srv_cnt && status==PJ_SUCCESS; ++j) { pjstun_msg_hdr *msg_hdr = (pjstun_msg_hdr*) out_msg; pj_ssize_t sent_len; if (rec[i].srv[j].mapped_port != 0) continue; /* Modify message so that we can distinguish response. */ msg_hdr->tsx[2] = pj_htonl(i); msg_hdr->tsx[3] = pj_htonl(j); /* Send! */ sent_len = out_msg_len; status = pj_sock_sendto(sock[i], out_msg, &sent_len, 0, (pj_sockaddr_t*)&srv_addr[j], sizeof(pj_sockaddr_in)); } } /* All requests sent. * The loop below will wait for responses until all responses have * been received (i.e. wait_resp==0) or timeout occurs, which then * we'll go to the next retransmission iteration. */ TRACE_((THIS_FILE, " Request(s) sent, counter=%d", send_cnt)); /* Calculate time of next retransmission. */ pj_gettimeofday(&next_tx); next_tx.sec += (stun_timer[send_cnt]/1000); next_tx.msec += (stun_timer[send_cnt]%1000); pj_time_val_normalize(&next_tx); for (pj_gettimeofday(&now), select_rc=1; status==PJ_SUCCESS && select_rc>=1 && wait_resp>0 && PJ_TIME_VAL_LT(now, next_tx); pj_gettimeofday(&now)) { pj_time_val timeout; timeout = next_tx; PJ_TIME_VAL_SUB(timeout, now); for (i=0; i<sock_cnt; ++i) { PJ_FD_SET(sock[i], &r); } select_rc = pj_sock_select(nfds+1, &r, NULL, NULL, &timeout); TRACE_((THIS_FILE, " select() rc=%d", select_rc)); if (select_rc < 1) continue; for (i=0; i<sock_cnt; ++i) { int sock_idx, srv_idx; pj_ssize_t len; pjstun_msg msg; pj_sockaddr_in addr; int addrlen = sizeof(addr); pjstun_mapped_addr_attr *attr; char recv_buf[128]; if (!PJ_FD_ISSET(sock[i], &r)) continue; len = sizeof(recv_buf); status = pj_sock_recvfrom( sock[i], recv_buf, &len, 0, (pj_sockaddr_t*)&addr, &addrlen); if (status != PJ_SUCCESS) { char errmsg[PJ_ERR_MSG_SIZE]; PJ_LOG(4,(THIS_FILE, "recvfrom() error ignored: %s", pj_strerror(status, errmsg,sizeof(errmsg)).ptr)); /* Ignore non-PJ_SUCCESS status. * It possible that other SIP entity is currently * sending SIP request to us, and because SIP message * is larger than STUN, we could get EMSGSIZE when * we call recvfrom(). */ status = PJ_SUCCESS; continue; } status = pjstun_parse_msg(recv_buf, len, &msg); if (status != PJ_SUCCESS) { char errmsg[PJ_ERR_MSG_SIZE]; PJ_LOG(4,(THIS_FILE, "STUN parsing error ignored: %s", pj_strerror(status, errmsg,sizeof(errmsg)).ptr)); /* Also ignore non-successful parsing. This may not * be STUN response at all. See the comment above. */ status = PJ_SUCCESS; continue; } sock_idx = pj_ntohl(msg.hdr->tsx[2]); srv_idx = pj_ntohl(msg.hdr->tsx[3]); if (sock_idx<0 || sock_idx>=sock_cnt || sock_idx!=i || srv_idx<0 || srv_idx>=2) { status = PJLIB_UTIL_ESTUNININDEX; continue; } if (pj_ntohs(msg.hdr->type) != PJSTUN_BINDING_RESPONSE) { status = PJLIB_UTIL_ESTUNNOBINDRES; continue; } if (rec[sock_idx].srv[srv_idx].mapped_port != 0) { /* Already got response */ continue; } /* From this part, we consider the packet as a valid STUN * response for our request. */ --wait_resp; if (pjstun_msg_find_attr(&msg, PJSTUN_ATTR_ERROR_CODE) != NULL) { status = PJLIB_UTIL_ESTUNRECVERRATTR; continue; } attr = (pjstun_mapped_addr_attr*) pjstun_msg_find_attr(&msg, PJSTUN_ATTR_MAPPED_ADDR); if (!attr) { attr = (pjstun_mapped_addr_attr*) pjstun_msg_find_attr(&msg, PJSTUN_ATTR_XOR_MAPPED_ADDR); if (!attr || attr->family != 1) { status = PJLIB_UTIL_ESTUNNOMAP; continue; } } rec[sock_idx].srv[srv_idx].mapped_addr = attr->addr; rec[sock_idx].srv[srv_idx].mapped_port = attr->port; if (pj_ntohs(attr->hdr.type) == PJSTUN_ATTR_XOR_MAPPED_ADDR) { rec[sock_idx].srv[srv_idx].mapped_addr ^= pj_htonl(STUN_MAGIC); rec[sock_idx].srv[srv_idx].mapped_port ^= pj_htons(STUN_MAGIC >> 16); } } } /* The best scenario is if all requests have been replied. * Then we don't need to go to the next retransmission iteration. */ if (wait_resp <= 0) break; }