static int client_thread(void *unused) { PJ_UNUSED_ARG(unused); while (!client->quit) { pj_fd_set_t readset; pj_time_val delay = {0, 10}; /* Also poll the timer heap */ pj_timer_heap_poll(stun_cfg.timer_heap, NULL); /* Poll client socket */ PJ_FD_ZERO(&readset); PJ_FD_SET(client->sock, &readset); if (pj_sock_select((int)client->sock+1, &readset, NULL, NULL, &delay)==1 && PJ_FD_ISSET(client->sock, &readset)) { char pkt[1000]; pj_ssize_t len; pj_status_t status; pj_sockaddr src_addr; int src_addr_len; len = sizeof(pkt); src_addr_len = sizeof(src_addr); status = pj_sock_recvfrom(client->sock, pkt, &len, 0, &src_addr, &src_addr_len); if (status != PJ_SUCCESS) continue; /* Increment client's receive count */ client->recv_count++; /* Only pass to client if we allow to respond */ if (!client->responding) continue; pj_stun_session_on_rx_pkt(client->sess, pkt, len, PJ_STUN_CHECK_PACKET | PJ_STUN_IS_DATAGRAM, NULL, NULL, &src_addr, src_addr_len); } } return 0; }
static int server_thread(void *unused) { PJ_UNUSED_ARG(unused); PJ_LOG(5,("", " server thread started")); while (!server->quit) { pj_fd_set_t readset; pj_time_val delay = {0, 10}; PJ_FD_ZERO(&readset); PJ_FD_SET(server->sock, &readset); if (pj_sock_select((int)server->sock+1, &readset, NULL, NULL, &delay)==1 && PJ_FD_ISSET(server->sock, &readset)) { char pkt[1000]; pj_ssize_t len; pj_status_t status; pj_sockaddr src_addr; int src_addr_len; len = sizeof(pkt); src_addr_len = sizeof(src_addr); status = pj_sock_recvfrom(server->sock, pkt, &len, 0, &src_addr, &src_addr_len); if (status != PJ_SUCCESS) continue; /* Increment server's receive count */ server->recv_count++; /* Only pass to server if we allow to respond */ if (!server->responding) continue; pj_stun_session_on_rx_pkt(server->sess, pkt, len, PJ_STUN_CHECK_PACKET | PJ_STUN_IS_DATAGRAM, NULL, NULL, &src_addr, src_addr_len); } } return 0; }
/* int response_client_send(response_client_t *uclient, response_request_t *request) { int ret; long nbytes; char buff[UCLIENT_BUFSIZE]; response_build_request(buff, sizeof(buff), request); nbytes = strlen(buff); ret = pj_sock_sendto(uclient->fd, buff, &nbytes, 0, (const pj_sockaddr_t *)uclient->connect_data, sizeof(pj_sockaddr_in)); if(ret != 0) { PERROR_IF_TRUE(1, "Error in sending data\n"); return -1; } return nbytes; } int response_client_send_ex(response_client_t *uclient, response_request_t *request) { int ret; long nbytes; char buff[UCLIENT_BUFSIZE]; char cipher[UCLIENT_BUFSIZE]; char message[UCLIENT_BUFSIZE]; uint32_t timestamp; char sts[32]; char *passphrase = uclient->passphrase_f(); char otp[100]; char *id = uclient->id_f(); char challenge[32]; int len = 32; int len1 = 32; lvc_t lvc; timestamp = get_ts(); len1 = ts2str(timestamp, sts); generate_otp(otp, passphrase, sts); do_encrypt(challenge, &len, sts, len1, otp); lvc_init(&lvc, message, UCLIENT_BUFSIZE); fprintf(stdout, "lvc_pack id:%s\n", id); lvc_pack( &lvc, strlen(id), id ); fprintf(stdout, "lvc_pack ts:%u\n", timestamp); lvc_pack( &lvc, sizeof(uint32_t), (char *)×tamp ); fprintf(stdout, "lvc_pack challenge:%d\n", len); lvc_pack( &lvc, len, challenge ); response_build_request(buff, sizeof(buff), request); nbytes = strlen(buff); fprintf(stdout, "Message to send:%.*s\n", nbytes, buff); len = sizeof(cipher); do_encrypt(cipher, &len, buff, nbytes, otp); lvc_pack( &lvc, len, cipher ); lvc_pack_finish(&lvc); nbytes = lvc.len; ret = pj_sock_sendto(uclient->fd, lvc.data, &nbytes, 0, (const pj_sockaddr_t *)uclient->connect_data, sizeof(pj_sockaddr_in)); //ret = pj_sock_sendto(uclient->fd, buff, &nbytes, 0, (const pj_sockaddr_t *)uclient->connect_data, sizeof(pj_sockaddr_in)); if(ret != 0) { PERROR_IF_TRUE(1, "Error in sending data\n"); return -1; } return nbytes; } */ void response_client_recv(response_client_t *uclient) { char buff[UCLIENT_BUFSIZE]; long nbytes = UCLIENT_BUFSIZE; response_request_t req; int len = sizeof(pj_sockaddr_in); pj_sock_recvfrom(uclient->fd, (void *)buff, &nbytes, 0, (pj_sockaddr_t *)uclient->connect_data, &len); buff[nbytes] = '\0'; printf("buf: %s\n", buff); response_parse_request(buff, sizeof(buff), &req); #if 1 if (uclient->on_response_f != NULL) { uclient->on_response_f(uclient, &req); } #endif }
static int worker_thread(void *arg) { pj_sock_t sock = (pj_sock_t)arg; char buf[512]; pj_status_t last_recv_err = PJ_SUCCESS, last_write_err = PJ_SUCCESS; while (!thread_quit_flag) { pj_ssize_t len; pj_status_t rc; pj_sockaddr_in addr; int addrlen; len = sizeof(buf); addrlen = sizeof(addr); rc = pj_sock_recvfrom(sock, buf, &len, 0, &addr, &addrlen); if (rc != 0) { if (rc != last_recv_err) { app_perror("...recv error", rc); last_recv_err = rc; } continue; } pj_atomic_add(total_bytes, (pj_atomic_value_t)len); rc = pj_sock_sendto(sock, buf, &len, 0, &addr, addrlen); if (rc != PJ_SUCCESS) { if (rc != last_write_err) { app_perror("...send error", rc); last_write_err = rc; } continue; } } return 0; }
static int server_thread_proc(void *p) { struct stun_test_session *test_sess = (struct stun_test_session*)p; pj_pool_t *pool; pj_status_t status; PJ_LOG(4,(THIS_FILE, "Server thread running")); pool = pj_pool_create(test_sess->stun_cfg.pf, "server", 512, 512, NULL); while (!test_sess->thread_quit_flag) { pj_time_val timeout = {0, 10}; pj_fd_set_t rdset; int n; /* Serve client */ PJ_FD_ZERO(&rdset); PJ_FD_SET(test_sess->server_sock, &rdset); n = pj_sock_select(test_sess->server_sock+1, &rdset, NULL, NULL, &timeout); if (n==1 && PJ_FD_ISSET(test_sess->server_sock, &rdset)) { pj_uint8_t pkt[512]; pj_ssize_t pkt_len; pj_size_t res_len; pj_sockaddr client_addr; int addr_len; pj_stun_msg *stun_req, *stun_res; pj_pool_reset(pool); /* Got query */ pkt_len = sizeof(pkt); addr_len = sizeof(client_addr); status = pj_sock_recvfrom(test_sess->server_sock, pkt, &pkt_len, 0, &client_addr, &addr_len); if (status != PJ_SUCCESS) { continue; } status = pj_stun_msg_decode(pool, pkt, pkt_len, PJ_STUN_IS_DATAGRAM, &stun_req, NULL, NULL); if (status != PJ_SUCCESS) { PJ_PERROR(1,(THIS_FILE, status, "STUN request decode error")); continue; } status = pj_stun_msg_create_response(pool, stun_req, PJ_STUN_SC_BAD_REQUEST, NULL, &stun_res); if (status != PJ_SUCCESS) { PJ_PERROR(1,(THIS_FILE, status, "STUN create response error")); continue; } status = pj_stun_msg_encode(stun_res, pkt, sizeof(pkt), 0, NULL, &res_len); if (status != PJ_SUCCESS) { PJ_PERROR(1,(THIS_FILE, status, "STUN encode error")); continue; } /* Ignore request */ if (test_sess->param.server_drop_request) continue; /* Wait for signal to continue */ if (test_sess->param.server_wait_for_event) pj_event_wait(test_sess->server_event); pkt_len = res_len; pj_sock_sendto(test_sess->server_sock, pkt, &pkt_len, 0, &client_addr, pj_sockaddr_get_len(&client_addr)); } } pj_pool_release(pool); PJ_LOG(4,(THIS_FILE, "Server thread quitting")); return 0; }
PJ_DEF(pj_status_t) pjstun_get_mapped_addr( pj_pool_factory *pf, int sock_cnt, pj_sock_t sock[], const pj_str_t *srv1, int port1, const pj_str_t *srv2, int port2, pj_sockaddr_in mapped_addr[]) { unsigned srv_cnt; pj_sockaddr_in srv_addr[2]; int i, j, send_cnt = 0, nfds; pj_pool_t *pool; struct query_rec { struct { pj_uint32_t mapped_addr; pj_uint32_t mapped_port; } srv[2]; } *rec; void *out_msg; pj_size_t out_msg_len; int wait_resp = 0; pj_status_t status; PJ_CHECK_STACK(); TRACE_((THIS_FILE, "Entering pjstun_get_mapped_addr()")); /* Create pool. */ pool = pj_pool_create(pf, "stun%p", 400, 400, NULL); if (!pool) return PJ_ENOMEM; /* Allocate client records */ rec = (struct query_rec*) pj_pool_calloc(pool, sock_cnt, sizeof(*rec)); if (!rec) { status = PJ_ENOMEM; goto on_error; } TRACE_((THIS_FILE, " Memory allocated.")); /* Create the outgoing BIND REQUEST message template */ status = pjstun_create_bind_req( pool, &out_msg, &out_msg_len, pj_rand(), pj_rand()); if (status != PJ_SUCCESS) goto on_error; TRACE_((THIS_FILE, " Binding request created.")); /* Resolve servers. */ status = pj_sockaddr_in_init(&srv_addr[0], srv1, (pj_uint16_t)port1); if (status != PJ_SUCCESS) goto on_error; srv_cnt = 1; if (srv2 && port2) { status = pj_sockaddr_in_init(&srv_addr[1], srv2, (pj_uint16_t)port2); if (status != PJ_SUCCESS) goto on_error; if (srv_addr[1].sin_addr.s_addr != srv_addr[0].sin_addr.s_addr && srv_addr[1].sin_port != srv_addr[0].sin_port) { srv_cnt++; } } TRACE_((THIS_FILE, " Server initialized, using %d server(s)", srv_cnt)); /* Init mapped addresses to zero */ pj_memset(mapped_addr, 0, sock_cnt * sizeof(pj_sockaddr_in)); /* We need these many responses */ wait_resp = sock_cnt * srv_cnt; TRACE_((THIS_FILE, " Done initialization.")); #if defined(PJ_SELECT_NEEDS_NFDS) && PJ_SELECT_NEEDS_NFDS!=0 nfds = -1; for (i=0; i<sock_cnt; ++i) { if (sock[i] > nfds) { nfds = sock[i]; } } #else nfds = FD_SETSIZE-1; #endif /* Main retransmission loop. */ for (send_cnt=0; send_cnt<MAX_REQUEST; ++send_cnt) { pj_time_val next_tx, now; pj_fd_set_t r; int select_rc; PJ_FD_ZERO(&r); /* Send messages to servers that has not given us response. */ for (i=0; i<sock_cnt && status==PJ_SUCCESS; ++i) { for (j=0; j<srv_cnt && status==PJ_SUCCESS; ++j) { pjstun_msg_hdr *msg_hdr = (pjstun_msg_hdr*) out_msg; pj_ssize_t sent_len; if (rec[i].srv[j].mapped_port != 0) continue; /* Modify message so that we can distinguish response. */ msg_hdr->tsx[2] = pj_htonl(i); msg_hdr->tsx[3] = pj_htonl(j); /* Send! */ sent_len = out_msg_len; status = pj_sock_sendto(sock[i], out_msg, &sent_len, 0, (pj_sockaddr_t*)&srv_addr[j], sizeof(pj_sockaddr_in)); } } /* All requests sent. * The loop below will wait for responses until all responses have * been received (i.e. wait_resp==0) or timeout occurs, which then * we'll go to the next retransmission iteration. */ TRACE_((THIS_FILE, " Request(s) sent, counter=%d", send_cnt)); /* Calculate time of next retransmission. */ pj_gettimeofday(&next_tx); next_tx.sec += (stun_timer[send_cnt]/1000); next_tx.msec += (stun_timer[send_cnt]%1000); pj_time_val_normalize(&next_tx); for (pj_gettimeofday(&now), select_rc=1; status==PJ_SUCCESS && select_rc>=1 && wait_resp>0 && PJ_TIME_VAL_LT(now, next_tx); pj_gettimeofday(&now)) { pj_time_val timeout; timeout = next_tx; PJ_TIME_VAL_SUB(timeout, now); for (i=0; i<sock_cnt; ++i) { PJ_FD_SET(sock[i], &r); } select_rc = pj_sock_select(nfds+1, &r, NULL, NULL, &timeout); TRACE_((THIS_FILE, " select() rc=%d", select_rc)); if (select_rc < 1) continue; for (i=0; i<sock_cnt; ++i) { int sock_idx, srv_idx; pj_ssize_t len; pjstun_msg msg; pj_sockaddr_in addr; int addrlen = sizeof(addr); pjstun_mapped_addr_attr *attr; char recv_buf[128]; if (!PJ_FD_ISSET(sock[i], &r)) continue; len = sizeof(recv_buf); status = pj_sock_recvfrom( sock[i], recv_buf, &len, 0, (pj_sockaddr_t*)&addr, &addrlen); if (status != PJ_SUCCESS) { char errmsg[PJ_ERR_MSG_SIZE]; PJ_LOG(4,(THIS_FILE, "recvfrom() error ignored: %s", pj_strerror(status, errmsg,sizeof(errmsg)).ptr)); /* Ignore non-PJ_SUCCESS status. * It possible that other SIP entity is currently * sending SIP request to us, and because SIP message * is larger than STUN, we could get EMSGSIZE when * we call recvfrom(). */ status = PJ_SUCCESS; continue; } status = pjstun_parse_msg(recv_buf, len, &msg); if (status != PJ_SUCCESS) { char errmsg[PJ_ERR_MSG_SIZE]; PJ_LOG(4,(THIS_FILE, "STUN parsing error ignored: %s", pj_strerror(status, errmsg,sizeof(errmsg)).ptr)); /* Also ignore non-successful parsing. This may not * be STUN response at all. See the comment above. */ status = PJ_SUCCESS; continue; } sock_idx = pj_ntohl(msg.hdr->tsx[2]); srv_idx = pj_ntohl(msg.hdr->tsx[3]); if (sock_idx<0 || sock_idx>=sock_cnt || sock_idx!=i || srv_idx<0 || srv_idx>=2) { status = PJLIB_UTIL_ESTUNININDEX; continue; } if (pj_ntohs(msg.hdr->type) != PJSTUN_BINDING_RESPONSE) { status = PJLIB_UTIL_ESTUNNOBINDRES; continue; } if (rec[sock_idx].srv[srv_idx].mapped_port != 0) { /* Already got response */ continue; } /* From this part, we consider the packet as a valid STUN * response for our request. */ --wait_resp; if (pjstun_msg_find_attr(&msg, PJSTUN_ATTR_ERROR_CODE) != NULL) { status = PJLIB_UTIL_ESTUNRECVERRATTR; continue; } attr = (pjstun_mapped_addr_attr*) pjstun_msg_find_attr(&msg, PJSTUN_ATTR_MAPPED_ADDR); if (!attr) { attr = (pjstun_mapped_addr_attr*) pjstun_msg_find_attr(&msg, PJSTUN_ATTR_XOR_MAPPED_ADDR); if (!attr || attr->family != 1) { status = PJLIB_UTIL_ESTUNNOMAP; continue; } } rec[sock_idx].srv[srv_idx].mapped_addr = attr->addr; rec[sock_idx].srv[srv_idx].mapped_port = attr->port; if (pj_ntohs(attr->hdr.type) == PJSTUN_ATTR_XOR_MAPPED_ADDR) { rec[sock_idx].srv[srv_idx].mapped_addr ^= pj_htonl(STUN_MAGIC); rec[sock_idx].srv[srv_idx].mapped_port ^= pj_htons(STUN_MAGIC >> 16); } } } /* The best scenario is if all requests have been replied. * Then we don't need to go to the next retransmission iteration. */ if (wait_resp <= 0) break; }
static int send_recv_test(int sock_type, pj_sock_t ss, pj_sock_t cs, pj_sockaddr_in *dstaddr, pj_sockaddr_in *srcaddr, int addrlen) { enum { DATA_LEN = 16 }; char senddata[DATA_LEN+4], recvdata[DATA_LEN+4]; pj_ssize_t sent, received, total_received; pj_status_t rc; TRACE_(("test", "....create_random_string()")); pj_create_random_string(senddata, DATA_LEN); senddata[DATA_LEN-1] = '\0'; /* * Test send/recv small data. */ TRACE_(("test", "....sendto()")); if (dstaddr) { sent = DATA_LEN; rc = pj_sock_sendto(cs, senddata, &sent, 0, dstaddr, addrlen); if (rc != PJ_SUCCESS || sent != DATA_LEN) { app_perror("...sendto error", rc); rc = -140; goto on_error; } } else { sent = DATA_LEN; rc = pj_sock_send(cs, senddata, &sent, 0); if (rc != PJ_SUCCESS || sent != DATA_LEN) { app_perror("...send error", rc); rc = -145; goto on_error; } } TRACE_(("test", "....recv()")); if (srcaddr) { pj_sockaddr_in addr; int srclen = sizeof(addr); pj_bzero(&addr, sizeof(addr)); received = DATA_LEN; rc = pj_sock_recvfrom(ss, recvdata, &received, 0, &addr, &srclen); if (rc != PJ_SUCCESS || received != DATA_LEN) { app_perror("...recvfrom error", rc); rc = -150; goto on_error; } if (srclen != addrlen) return -151; if (pj_sockaddr_cmp(&addr, srcaddr) != 0) { char srcaddr_str[32], addr_str[32]; strcpy(srcaddr_str, pj_inet_ntoa(srcaddr->sin_addr)); strcpy(addr_str, pj_inet_ntoa(addr.sin_addr)); PJ_LOG(3,("test", "...error: src address mismatch (original=%s, " "recvfrom addr=%s)", srcaddr_str, addr_str)); return -152; } } else { /* Repeat recv() until all data is received. * This applies only for non-UDP of course, since for UDP * we would expect all data to be received in one packet. */ total_received = 0; do { received = DATA_LEN-total_received; rc = pj_sock_recv(ss, recvdata+total_received, &received, 0); if (rc != PJ_SUCCESS) { app_perror("...recv error", rc); rc = -155; goto on_error; } if (received <= 0) { PJ_LOG(3,("", "...error: socket has closed! (received=%d)", received)); rc = -156; goto on_error; } if (received != DATA_LEN-total_received) { if (sock_type != pj_SOCK_STREAM()) { PJ_LOG(3,("", "...error: expecting %u bytes, got %u bytes", DATA_LEN-total_received, received)); rc = -157; goto on_error; } } total_received += received; } while (total_received < DATA_LEN); } TRACE_(("test", "....memcmp()")); if (pj_memcmp(senddata, recvdata, DATA_LEN) != 0) { PJ_LOG(3,("","...error: received data mismatch " "(got:'%s' expecting:'%s'", recvdata, senddata)); rc = -160; goto on_error; } /* * Test send/recv big data. */ TRACE_(("test", "....sendto()")); if (dstaddr) { sent = BIG_DATA_LEN; rc = pj_sock_sendto(cs, bigdata, &sent, 0, dstaddr, addrlen); if (rc != PJ_SUCCESS || sent != BIG_DATA_LEN) { app_perror("...sendto error", rc); rc = -161; goto on_error; } } else { sent = BIG_DATA_LEN; rc = pj_sock_send(cs, bigdata, &sent, 0); if (rc != PJ_SUCCESS || sent != BIG_DATA_LEN) { app_perror("...send error", rc); rc = -165; goto on_error; } } TRACE_(("test", "....recv()")); /* Repeat recv() until all data is received. * This applies only for non-UDP of course, since for UDP * we would expect all data to be received in one packet. */ total_received = 0; do { received = BIG_DATA_LEN-total_received; rc = pj_sock_recv(ss, bigbuffer+total_received, &received, 0); if (rc != PJ_SUCCESS) { app_perror("...recv error", rc); rc = -170; goto on_error; } if (received <= 0) { PJ_LOG(3,("", "...error: socket has closed! (received=%d)", received)); rc = -173; goto on_error; } if (received != BIG_DATA_LEN-total_received) { if (sock_type != pj_SOCK_STREAM()) { PJ_LOG(3,("", "...error: expecting %u bytes, got %u bytes", BIG_DATA_LEN-total_received, received)); rc = -176; goto on_error; } } total_received += received; } while (total_received < BIG_DATA_LEN); TRACE_(("test", "....memcmp()")); if (pj_memcmp(bigdata, bigbuffer, BIG_DATA_LEN) != 0) { PJ_LOG(3,("", "...error: received data has been altered!")); rc = -180; goto on_error; } rc = 0; on_error: return rc; }
/* * pj_ioqueue_recvfrom() * * Start asynchronous recvfrom() from the socket. */ PJ_DEF(pj_status_t) pj_ioqueue_recvfrom( pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, void *buffer, pj_ssize_t *length, unsigned flags, pj_sockaddr_t *addr, int *addrlen) { struct read_operation *read_op; PJ_ASSERT_RETURN(key && op_key && buffer && length, PJ_EINVAL); PJ_CHECK_STACK(); /* Check if key is closing. */ if (IS_CLOSING(key)) return PJ_ECANCELLED; read_op = (struct read_operation*)op_key; read_op->op = PJ_IOQUEUE_OP_NONE; /* Try to see if there's data immediately available. */ if ((flags & PJ_IOQUEUE_ALWAYS_ASYNC) == 0) { pj_status_t status; pj_ssize_t size; size = *length; status = pj_sock_recvfrom(key->fd, buffer, &size, flags, addr, addrlen); if (status == PJ_SUCCESS) { /* Yes! Data is available! */ *length = size; return PJ_SUCCESS; } else { /* If error is not EWOULDBLOCK (or EAGAIN on Linux), report * the error to caller. */ if (status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL)) return status; } } flags &= ~(PJ_IOQUEUE_ALWAYS_ASYNC); /* * No data is immediately available. * Must schedule asynchronous operation to the ioqueue. */ read_op->op = PJ_IOQUEUE_OP_RECV_FROM; read_op->buf = buffer; read_op->size = *length; read_op->flags = flags; read_op->rmt_addr = addr; read_op->rmt_addrlen = addrlen; pj_mutex_lock(key->mutex); pj_list_insert_before(&key->read_list, read_op); ioqueue_add_to_set(key->ioqueue, key, READABLE_EVENT); pj_mutex_unlock(key->mutex); return PJ_EPENDING; }
void ioqueue_dispatch_read_event( pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h ) { pj_status_t rc; /* Lock the key. */ pj_mutex_lock(h->mutex); if (IS_CLOSING(h)) { pj_mutex_unlock(h->mutex); return; } # if PJ_HAS_TCP if (!pj_list_empty(&h->accept_list)) { struct accept_operation *accept_op; pj_bool_t has_lock; /* Get one accept operation from the list. */ accept_op = h->accept_list.next; pj_list_erase(accept_op); accept_op->op = PJ_IOQUEUE_OP_NONE; /* Clear bit in fdset if there is no more pending accept */ if (pj_list_empty(&h->accept_list)) ioqueue_remove_from_set(ioqueue, h, READABLE_EVENT); rc=pj_sock_accept(h->fd, accept_op->accept_fd, accept_op->rmt_addr, accept_op->addrlen); if (rc==PJ_SUCCESS && accept_op->local_addr) { rc = pj_sock_getsockname(*accept_op->accept_fd, accept_op->local_addr, accept_op->addrlen); } /* Unlock; from this point we don't need to hold key's mutex * (unless concurrency is disabled, which in this case we should * hold the mutex while calling the callback) */ if (h->allow_concurrent) { /* concurrency may be changed while we're in the callback, so * save it to a flag. */ has_lock = PJ_FALSE; pj_mutex_unlock(h->mutex); } else { has_lock = PJ_TRUE; } /* Call callback. */ if (h->cb.on_accept_complete && !IS_CLOSING(h)) { (*h->cb.on_accept_complete)(h, (pj_ioqueue_op_key_t*)accept_op, *accept_op->accept_fd, rc); } if (has_lock) { pj_mutex_unlock(h->mutex); } } else # endif if (key_has_pending_read(h)) { struct read_operation *read_op; pj_ssize_t bytes_read; pj_bool_t has_lock; /* Get one pending read operation from the list. */ read_op = h->read_list.next; pj_list_erase(read_op); /* Clear fdset if there is no pending read. */ if (pj_list_empty(&h->read_list)) ioqueue_remove_from_set(ioqueue, h, READABLE_EVENT); bytes_read = read_op->size; if ((read_op->op == PJ_IOQUEUE_OP_RECV_FROM)) { read_op->op = PJ_IOQUEUE_OP_NONE; rc = pj_sock_recvfrom(h->fd, read_op->buf, &bytes_read, read_op->flags, read_op->rmt_addr, read_op->rmt_addrlen); } else if ((read_op->op == PJ_IOQUEUE_OP_RECV)) { read_op->op = PJ_IOQUEUE_OP_NONE; rc = pj_sock_recv(h->fd, read_op->buf, &bytes_read, read_op->flags); } else { pj_assert(read_op->op == PJ_IOQUEUE_OP_READ); read_op->op = PJ_IOQUEUE_OP_NONE; /* * User has specified pj_ioqueue_read(). * On Win32, we should do ReadFile(). But because we got * here because of select() anyway, user must have put a * socket descriptor on h->fd, which in this case we can * just call pj_sock_recv() instead of ReadFile(). * On Unix, user may put a file in h->fd, so we'll have * to call read() here. * This may not compile on systems which doesn't have * read(). That's why we only specify PJ_LINUX here so * that error is easier to catch. */ # if defined(PJ_WIN32) && PJ_WIN32 != 0 || \ defined(PJ_WIN32_WINCE) && PJ_WIN32_WINCE != 0 rc = pj_sock_recv(h->fd, read_op->buf, &bytes_read, read_op->flags); //rc = ReadFile((HANDLE)h->fd, read_op->buf, read_op->size, // &bytes_read, NULL); # elif (defined(PJ_HAS_UNISTD_H) && PJ_HAS_UNISTD_H != 0) bytes_read = read(h->fd, read_op->buf, bytes_read); rc = (bytes_read >= 0) ? PJ_SUCCESS : pj_get_os_error(); # elif defined(PJ_LINUX_KERNEL) && PJ_LINUX_KERNEL != 0 bytes_read = sys_read(h->fd, read_op->buf, bytes_read); rc = (bytes_read >= 0) ? PJ_SUCCESS : -bytes_read; # else # error "Implement read() for this platform!" # endif } if (rc != PJ_SUCCESS) { # if defined(PJ_WIN32) && PJ_WIN32 != 0 /* On Win32, for UDP, WSAECONNRESET on the receive side * indicates that previous sending has triggered ICMP Port * Unreachable message. * But we wouldn't know at this point which one of previous * key that has triggered the error, since UDP socket can * be shared! * So we'll just ignore it! */ if (rc == PJ_STATUS_FROM_OS(WSAECONNRESET)) { //PJ_LOG(4,(THIS_FILE, // "Ignored ICMP port unreach. on key=%p", h)); } # endif /* In any case we would report this to caller. */ bytes_read = -rc; } /* Unlock; from this point we don't need to hold key's mutex * (unless concurrency is disabled, which in this case we should * hold the mutex while calling the callback) */ if (h->allow_concurrent) { /* concurrency may be changed while we're in the callback, so * save it to a flag. */ has_lock = PJ_FALSE; pj_mutex_unlock(h->mutex); } else { has_lock = PJ_TRUE; } /* Call callback. */ if (h->cb.on_read_complete && !IS_CLOSING(h)) { (*h->cb.on_read_complete)(h, (pj_ioqueue_op_key_t*)read_op, bytes_read); } if (has_lock) { pj_mutex_unlock(h->mutex); } } else { /* * This is normal; execution may fall here when multiple threads * are signalled for the same event, but only one thread eventually * able to process the event. */ pj_mutex_unlock(h->mutex); } }
static int thread_proc(void *data) { dupsock_t *p_dupsock = (dupsock_t *)data; pj_thread_t *thread; pj_thread_desc desc; struct pj_time_val tv; int ret; pj_ssize_t ntemp; int sock_len; // pj_bzero(desc, sizeof(desc)); // CHECK(__FILE__, pj_thread_register("dupsock", desc, &thread)); PJ_FD_ZERO(&(p_dupsock->rfds)); PJ_FD_ZERO(&(p_dupsock->wfds)); PJ_FD_SET(*(p_dupsock->p_sock), &(p_dupsock->rfds)); tv.sec = 0; tv.msec = 20; // MAIN LOOP p_dupsock->b_quit = 0; while( (!p_dupsock->b_quit) || (p_dupsock->wait_cnt > 0) || (p_dupsock->to_send != NULL) ) { if(p_dupsock->wait_cnt > 0) { p_dupsock->wait_cnt--; pj_event_pulse(p_dupsock->p_event); } pj_thread_sleep(10); PJ_FD_ZERO(&(p_dupsock->rfds)); PJ_FD_ZERO(&(p_dupsock->wfds)); PJ_FD_SET(*(p_dupsock->p_sock), &(p_dupsock->rfds)); if(p_dupsock->to_send != NULL) PJ_FD_SET(*(p_dupsock->p_sock), &(p_dupsock->wfds)); ret = pj_sock_select(*(p_dupsock->p_sock) + 1, &(p_dupsock->rfds), &(p_dupsock->wfds), NULL, &tv); if( ret < 0 ) { PJ_LOG(2, (__FILE__, "Error in select")); } else if (ret > 0) { if( PJ_FD_ISSET( *(p_dupsock->p_sock), &(p_dupsock->rfds)) ) { ntemp = sizeof(p_dupsock->in_buffer); sock_len = sizeof(p_dupsock->in_packet.addr); pj_sock_recvfrom(*(p_dupsock->p_sock), p_dupsock->in_packet.data, &ntemp, 0, (pj_sockaddr_t *)(&(p_dupsock->in_packet.addr)), &sock_len); p_dupsock->in_packet.len = ntemp; if(p_dupsock->recv_callback != NULL) { p_dupsock->recv_callback(p_dupsock); } p_dupsock->in_packet.len = 0; } if( PJ_FD_ISSET( *(p_dupsock->p_sock), &(p_dupsock->wfds)) ) { ntemp = p_dupsock->to_send->len - p_dupsock->to_send->sent; pj_sock_sendto(*(p_dupsock->p_sock), p_dupsock->to_send->data + p_dupsock->to_send->sent, &ntemp, 0, (pj_sockaddr_t *)(&(p_dupsock->to_send->addr)), sizeof(p_dupsock->to_send->addr)); p_dupsock->to_send->sent += ntemp; if(p_dupsock->to_send->len == p_dupsock->to_send->sent) { if(p_dupsock->send_callback != NULL) { p_dupsock->send_callback(p_dupsock); } p_dupsock->to_send = NULL; PJ_FD_CLR(*(p_dupsock->p_sock), &(p_dupsock->wfds)); } } } //PJ_LOG(5, (__FILE__, "end of a loop")); } pj_event_destroy(p_dupsock->p_event); pj_sock_close(*(p_dupsock->p_sock)); return 0; }
PJ_DECL(pj_status_t) pjstun_get_mapped_addr( pj_pool_factory *pf, int sock_cnt, pj_sock_t sock[], const pj_str_t *srv1, int port1, const pj_str_t *srv2, int port2, pj_sockaddr_in mapped_addr[]) { pj_sockaddr_in srv_addr[2]; int i, j, send_cnt = 0; pj_pool_t *pool; struct { struct { pj_uint32_t mapped_addr; pj_uint32_t mapped_port; } srv[2]; } *rec; void *out_msg; pj_size_t out_msg_len; int wait_resp = 0; pj_status_t status; PJ_CHECK_STACK(); /* Create pool. */ pool = pj_pool_create(pf, "stun%p", 1024, 1024, NULL); if (!pool) return PJ_ENOMEM; /* Allocate client records */ rec = pj_pool_calloc(pool, sock_cnt, sizeof(*rec)); if (!rec) { status = PJ_ENOMEM; goto on_error; } /* Create the outgoing BIND REQUEST message template */ status = pjstun_create_bind_req( pool, &out_msg, &out_msg_len, pj_rand(), pj_rand()); if (status != PJ_SUCCESS) goto on_error; /* Resolve servers. */ status = pj_sockaddr_in_init(&srv_addr[0], srv1, (pj_uint16_t)port1); if (status != PJ_SUCCESS) goto on_error; status = pj_sockaddr_in_init(&srv_addr[1], srv2, (pj_uint16_t)port2); if (status != PJ_SUCCESS) goto on_error; /* Init mapped addresses to zero */ pj_memset(mapped_addr, 0, sock_cnt * sizeof(pj_sockaddr_in)); /* Main retransmission loop. */ for (send_cnt=0; send_cnt<MAX_REQUEST; ++send_cnt) { pj_time_val next_tx, now; pj_fd_set_t r; int select_rc; PJ_FD_ZERO(&r); /* Send messages to servers that has not given us response. */ for (i=0; i<sock_cnt && status==PJ_SUCCESS; ++i) { for (j=0; j<2 && status==PJ_SUCCESS; ++j) { pjstun_msg_hdr *msg_hdr = out_msg; pj_ssize_t sent_len; if (rec[i].srv[j].mapped_port != 0) continue; /* Modify message so that we can distinguish response. */ msg_hdr->tsx[2] = pj_htonl(i); msg_hdr->tsx[3] = pj_htonl(j); /* Send! */ sent_len = out_msg_len; status = pj_sock_sendto(sock[i], out_msg, &sent_len, 0, (pj_sockaddr_t*)&srv_addr[j], sizeof(pj_sockaddr_in)); if (status == PJ_SUCCESS) ++wait_resp; } } /* All requests sent. * The loop below will wait for responses until all responses have * been received (i.e. wait_resp==0) or timeout occurs, which then * we'll go to the next retransmission iteration. */ /* Calculate time of next retransmission. */ pj_gettimeofday(&next_tx); next_tx.sec += (stun_timer[send_cnt]/1000); next_tx.msec += (stun_timer[send_cnt]%1000); pj_time_val_normalize(&next_tx); for (pj_gettimeofday(&now), select_rc=1; status==PJ_SUCCESS && select_rc==1 && wait_resp>0 && PJ_TIME_VAL_LT(now, next_tx); pj_gettimeofday(&now)) { pj_time_val timeout; timeout = next_tx; PJ_TIME_VAL_SUB(timeout, now); for (i=0; i<sock_cnt; ++i) { PJ_FD_SET(sock[i], &r); } select_rc = pj_sock_select(FD_SETSIZE, &r, NULL, NULL, &timeout); if (select_rc < 1) continue; for (i=0; i<sock_cnt; ++i) { int sock_idx, srv_idx; pj_ssize_t len; pjstun_msg msg; pj_sockaddr_in addr; int addrlen = sizeof(addr); pjstun_mapped_addr_attr *attr; char recv_buf[128]; if (!PJ_FD_ISSET(sock[i], &r)) continue; len = sizeof(recv_buf); status = pj_sock_recvfrom( sock[i], recv_buf, &len, 0, (pj_sockaddr_t*)&addr, &addrlen); --wait_resp; if (status != PJ_SUCCESS) continue; status = pjstun_parse_msg(recv_buf, len, &msg); if (status != PJ_SUCCESS) { continue; } sock_idx = pj_ntohl(msg.hdr->tsx[2]); srv_idx = pj_ntohl(msg.hdr->tsx[3]); if (sock_idx<0 || sock_idx>=sock_cnt || srv_idx<0 || srv_idx>=2) { status = PJLIB_UTIL_ESTUNININDEX; continue; } if (pj_ntohs(msg.hdr->type) != PJSTUN_BINDING_RESPONSE) { status = PJLIB_UTIL_ESTUNNOBINDRES; continue; } if (pjstun_msg_find_attr(&msg, PJSTUN_ATTR_ERROR_CODE) != NULL) { status = PJLIB_UTIL_ESTUNRECVERRATTR; continue; } attr = (void*)pjstun_msg_find_attr(&msg, PJSTUN_ATTR_MAPPED_ADDR); if (!attr) { status = PJLIB_UTIL_ESTUNNOMAP; continue; } rec[sock_idx].srv[srv_idx].mapped_addr = attr->addr; rec[sock_idx].srv[srv_idx].mapped_port = attr->port; } } /* The best scenario is if all requests have been replied. * Then we don't need to go to the next retransmission iteration. */ if (wait_resp <= 0) break; } for (i=0; i<sock_cnt && status==PJ_SUCCESS; ++i) { if (rec[i].srv[0].mapped_addr == rec[i].srv[1].mapped_addr && rec[i].srv[0].mapped_port == rec[i].srv[1].mapped_port) { mapped_addr[i].sin_family = PJ_AF_INET; mapped_addr[i].sin_addr.s_addr = rec[i].srv[0].mapped_addr; mapped_addr[i].sin_port = (pj_uint16_t)rec[i].srv[0].mapped_port; if (rec[i].srv[0].mapped_addr == 0 || rec[i].srv[0].mapped_port == 0) { status = PJLIB_UTIL_ESTUNNOTRESPOND; break; } } else { status = PJLIB_UTIL_ESTUNSYMMETRIC; break; } } pj_pool_release(pool); return status; on_error: if (pool) pj_pool_release(pool); return status; }
PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioque, const pj_time_val *timeout) { pj_fdset_t rfdset, wfdset, xfdset; int rc; pj_ioqueue_key_t *h; /* Copy ioqueue's fd_set to local variables. */ pj_mutex_lock(ioque->mutex); rfdset = ioque->rfdset; wfdset = ioque->wfdset; #if PJ_HAS_TCP xfdset = ioque->xfdset; #else PJ_FD_ZERO(&xfdset); #endif /* Unlock ioqueue before select(). */ pj_mutex_unlock(ioque->mutex); rc = pj_sock_select(FD_SETSIZE, &rfdset, &wfdset, &xfdset, timeout); if (rc <= 0) return rc; /* Lock ioqueue again before scanning for signalled sockets. */ pj_mutex_lock(ioque->mutex); #if PJ_HAS_TCP /* Scan for exception socket */ h = ioque->hlist.next; for ( ; h!=&ioque->hlist; h = h->next) { if ((h->op & PJ_IOQUEUE_OP_CONNECT) && PJ_FD_ISSET(h->fd, &xfdset)) break; } if (h != &ioque->hlist) { /* 'connect()' should be the only operation. */ pj_assert((h->op == PJ_IOQUEUE_OP_CONNECT)); /* Clear operation. */ h->op &= ~(PJ_IOQUEUE_OP_CONNECT); PJ_FD_CLR(h->fd, &ioque->wfdset); PJ_FD_CLR(h->fd, &ioque->xfdset); /* Unlock I/O queue before calling callback. */ pj_mutex_unlock(ioque->mutex); /* Call callback. */ (*h->cb.on_connect_complete)(h, -1); return 1; } #endif /* PJ_HAS_TCP */ /* Scan for writable socket */ h = ioque->hlist.next; for ( ; h!=&ioque->hlist; h = h->next) { if ((PJ_IOQUEUE_IS_WRITE_OP(h->op) || PJ_IOQUEUE_IS_CONNECT_OP(h->op)) && PJ_FD_ISSET(h->fd, &wfdset)) break; } if (h != &ioque->hlist) { pj_assert(PJ_IOQUEUE_IS_WRITE_OP(h->op) || PJ_IOQUEUE_IS_CONNECT_OP(h->op)); #if PJ_HAS_TCP if ((h->op & PJ_IOQUEUE_OP_CONNECT)) { /* Completion of connect() operation */ pj_ssize_t bytes_transfered; #if defined(PJ_LINUX) /* from connect(2): * On Linux, use getsockopt to read the SO_ERROR option at * level SOL_SOCKET to determine whether connect() completed * successfully (if SO_ERROR is zero). */ int value; socklen_t vallen = sizeof(value); int rc = getsockopt(h->fd, SOL_SOCKET, SO_ERROR, &value, &vallen); if (rc != 0) { /* Argh!! What to do now??? * Just indicate that the socket is connected. The * application will get error as soon as it tries to use * the socket to send/receive. */ PJ_PERROR(("ioqueue", "Unable to determine connect() status")); bytes_transfered = 0; } else { bytes_transfered = value; } #elif defined(PJ_WIN32) bytes_transfered = 0; /* success */ #else # error "Got to check this one!" #endif /* Clear operation. */ h->op &= (~PJ_IOQUEUE_OP_CONNECT); PJ_FD_CLR(h->fd, &ioque->wfdset); PJ_FD_CLR(h->fd, &ioque->xfdset); /* Unlock mutex before calling callback. */ pj_mutex_unlock(ioque->mutex); /* Call callback. */ (*h->cb.on_connect_complete)(h, bytes_transfered); return 1; } else #endif /* PJ_HAS_TCP */ { /* Completion of write(), send(), or sendto() operation. */ /* Clear operation. */ h->op &= ~(PJ_IOQUEUE_OP_WRITE | PJ_IOQUEUE_OP_SEND_TO); PJ_FD_CLR(h->fd, &ioque->wfdset); /* Unlock mutex before calling callback. */ pj_mutex_unlock(ioque->mutex); /* Call callback. */ /* All data must have been sent? */ (*h->cb.on_write_complete)(h, h->wr_buflen); return 1; } /* Unreached. */ } /* Scan for readable socket. */ h = ioque->hlist.next; for ( ; h!=&ioque->hlist; h = h->next) { if ((PJ_IOQUEUE_IS_READ_OP(h->op) || PJ_IOQUEUE_IS_ACCEPT_OP(h->op)) && PJ_FD_ISSET(h->fd, &rfdset)) break; } if (h != &ioque->hlist) { pj_assert(PJ_IOQUEUE_IS_READ_OP(h->op) || PJ_IOQUEUE_IS_ACCEPT_OP(h->op)); # if PJ_HAS_TCP if ((h->op & PJ_IOQUEUE_OP_ACCEPT)) { /* accept() must be the only operation specified on server socket */ pj_assert(h->op == PJ_IOQUEUE_OP_ACCEPT); *h->accept_fd = pj_sock_accept(h->fd, h->rmt_addr, h->rmt_addrlen); if (*h->accept_fd == PJ_INVALID_SOCKET) { rc = -1; } else if (h->local_addr) { rc = pj_sock_getsockname(*h->accept_fd, h->local_addr, h->local_addrlen); } else { rc = 0; } h->op &= ~(PJ_IOQUEUE_OP_ACCEPT); PJ_FD_CLR(h->fd, &ioque->rfdset); /* Unlock mutex before calling callback. */ pj_mutex_unlock(ioque->mutex); /* Call callback. */ (*h->cb.on_accept_complete)(h, rc); return 1; } else # endif if ((h->op & PJ_IOQUEUE_OP_RECV_FROM)) { rc = pj_sock_recvfrom(h->fd, h->rd_buf, h->rd_buflen, 0, h->rmt_addr, h->rmt_addrlen); } else { rc = pj_sock_recv(h->fd, h->rd_buf, h->rd_buflen, 0); } if (rc < 0) { pj_status_t sock_err = -1; # if defined(_WIN32) /* On Win32, for UDP, WSAECONNRESET on the receive side * indicates that previous sending has triggered ICMP Port * Unreachable message. * But we wouldn't know at this point which one of previous * key that has triggered the error, since UDP socket can * be shared! * So we'll just ignore it! */ sock_err = pj_sock_getlasterror(); if (sock_err == PJ_ECONNRESET) { pj_mutex_unlock(ioque->mutex); PJ_LOG(4,(THIS_FILE, "Received ICMP port unreachable on key=%p (ignored)!", h)); return 0; } # endif PJ_LOG(4, (THIS_FILE, "socket recv error on key %p, rc=%d, err=%d", h, rc, sock_err)); } h->op &= ~(PJ_IOQUEUE_OP_READ | PJ_IOQUEUE_OP_RECV_FROM); PJ_FD_CLR(h->fd, &ioque->rfdset); /* Unlock mutex before callback. */ pj_mutex_unlock(ioque->mutex); /* Call callback. */ (*h->cb.on_read_complete)(h, rc); return 1; } /* Shouldn't happen. */ /* For strange reason on WinXP select() can return 1 while there is no * fd_set signaled. */ /* pj_assert(0); */ rc = 0; pj_mutex_unlock(ioque->mutex); return rc; }