PJ_DEF(pj_status_t) pj_stun_client_tsx_schedule_destroy( pj_stun_client_tsx *tsx, const pj_time_val *delay) { pj_status_t status; PJ_ASSERT_RETURN(tsx && delay, PJ_EINVAL); PJ_ASSERT_RETURN(tsx->cb.on_destroy, PJ_EINVAL); pj_grp_lock_acquire(tsx->grp_lock); /* Cancel previously registered timer */ pj_timer_heap_cancel_if_active(tsx->timer_heap, &tsx->destroy_timer, TIMER_INACTIVE); /* Stop retransmission, just in case */ pj_timer_heap_cancel_if_active(tsx->timer_heap, &tsx->retransmit_timer, TIMER_INACTIVE); status = pj_timer_heap_schedule_w_grp_lock(tsx->timer_heap, &tsx->destroy_timer, delay, TIMER_ACTIVE, tsx->grp_lock); if (status != PJ_SUCCESS) { pj_grp_lock_release(tsx->grp_lock); return status; } tsx->cb.on_complete = NULL; pj_grp_lock_release(tsx->grp_lock); TRACE_((tsx->obj_name, "STUN transaction %p schedule destroy", tsx)); return PJ_SUCCESS; }
static void schedule_registration ( pjsip_regc *regc, pj_int32_t expiration ) { if (regc->auto_reg && expiration > 0) { pj_time_val delay = { 0, 0}; pj_timer_heap_cancel_if_active(pjsip_endpt_get_timer_heap(regc->endpt), ®c->timer, 0); delay.sec = expiration - regc->delay_before_refresh; if (regc->expires != PJSIP_REGC_EXPIRATION_NOT_SPECIFIED && delay.sec > (pj_int32_t)regc->expires) { delay.sec = regc->expires; } if (delay.sec < DELAY_BEFORE_REFRESH) delay.sec = DELAY_BEFORE_REFRESH; regc->timer.cb = ®c_refresh_timer_cb; regc->timer.id = REFRESH_TIMER; regc->timer.user_data = regc; pjsip_endpt_schedule_timer( regc->endpt, ®c->timer, &delay); pj_gettimeofday(®c->last_reg); regc->next_reg = regc->last_reg; regc->next_reg.sec += delay.sec; } }
static void destroy_tdata(pj_stun_tx_data *tdata, pj_bool_t force) { TRACE_((THIS_FILE, "tdata %p destroy request, force=%d, tsx=%p", tdata, force, tdata->client_tsx)); if (tdata->res_timer.id != PJ_FALSE) { pj_timer_heap_cancel_if_active(tdata->sess->cfg->timer_heap, &tdata->res_timer, PJ_FALSE); pj_list_erase(tdata); } if (force) { pj_list_erase(tdata); if (tdata->client_tsx) { pj_stun_client_tsx_stop(tdata->client_tsx); pj_stun_client_tsx_set_data(tdata->client_tsx, NULL); } pj_pool_release(tdata->pool); } else { if (tdata->client_tsx) { /* "Probably" this is to absorb retransmission */ pj_time_val delay = {0, 300}; pj_stun_client_tsx_schedule_destroy(tdata->client_tsx, &delay); } else { pj_pool_release(tdata->pool); } } }
/* Destroy */ PJ_DEF(pj_status_t) pj_stun_sock_destroy(pj_stun_sock *stun_sock) { TRACE_((stun_sock->obj_name, "STUN sock %p request, ref_cnt=%d", stun_sock, pj_grp_lock_get_ref(stun_sock->grp_lock))); pj_grp_lock_acquire(stun_sock->grp_lock); if (stun_sock->is_destroying) { /* Destroy already called */ pj_grp_lock_release(stun_sock->grp_lock); return PJ_EINVALIDOP; } stun_sock->is_destroying = PJ_TRUE; pj_timer_heap_cancel_if_active(stun_sock->stun_cfg.timer_heap, &stun_sock->ka_timer, 0); if (stun_sock->active_sock != NULL) { stun_sock->sock_fd = PJ_INVALID_SOCKET; pj_activesock_close(stun_sock->active_sock); } else if (stun_sock->sock_fd != PJ_INVALID_SOCKET) { pj_sock_close(stun_sock->sock_fd); stun_sock->sock_fd = PJ_INVALID_SOCKET; } if (stun_sock->stun_sess) { pj_stun_session_destroy(stun_sock->stun_sess); } pj_grp_lock_dec_ref(stun_sock->grp_lock); pj_grp_lock_release(stun_sock->grp_lock); return PJ_SUCCESS; }
/* * Notify the STUN transaction about the arrival of STUN response. */ PJ_DEF(pj_status_t) pj_stun_client_tsx_on_rx_msg(pj_stun_client_tsx *tsx, const pj_stun_msg *msg, const pj_sockaddr_t *src_addr, unsigned src_addr_len) { pj_stun_errcode_attr *err_attr; pj_status_t status; /* Must be STUN response message */ if (!PJ_STUN_IS_SUCCESS_RESPONSE(msg->hdr.type) && !PJ_STUN_IS_ERROR_RESPONSE(msg->hdr.type)) { PJ_LOG(4,(tsx->obj_name, "STUN rx_msg() error: not response message")); return PJNATH_EINSTUNMSGTYPE; } /* We have a response with matching transaction ID. * We can cancel retransmit timer now. */ pj_timer_heap_cancel_if_active(tsx->timer_heap, &tsx->retransmit_timer, TIMER_INACTIVE); /* Find STUN error code attribute */ err_attr = (pj_stun_errcode_attr*) pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_ERROR_CODE, 0); if (err_attr && err_attr->err_code <= 200) { /* draft-ietf-behave-rfc3489bis-05.txt Section 8.3.2: * Any response between 100 and 299 MUST result in the cessation * of request retransmissions, but otherwise is discarded. */ PJ_LOG(4,(tsx->obj_name, "STUN rx_msg() error: received provisional %d code (%.*s)", err_attr->err_code, (int)err_attr->reason.slen, err_attr->reason.ptr)); return PJ_SUCCESS; } if (err_attr == NULL) { status = PJ_SUCCESS; } else { status = PJ_STATUS_FROM_STUN_CODE(err_attr->err_code); } /* Call callback */ if (!tsx->complete) { tsx->complete = PJ_TRUE; if (tsx->cb.on_complete) { tsx->cb.on_complete(tsx, status, msg, src_addr, src_addr_len); } /* We might have been destroyed, don't try to access the object */ } return PJ_SUCCESS; }
/* * Destroy transaction immediately. */ PJ_DEF(pj_status_t) pj_stun_client_tsx_stop(pj_stun_client_tsx *tsx) { PJ_ASSERT_RETURN(tsx, PJ_EINVAL); /* Don't call grp_lock_acquire() because we might be called on * group lock's destructor. */ pj_timer_heap_cancel_if_active(tsx->timer_heap, &tsx->retransmit_timer, TIMER_INACTIVE); pj_timer_heap_cancel_if_active(tsx->timer_heap, &tsx->destroy_timer, TIMER_INACTIVE); PJ_LOG(5,(tsx->obj_name, "STUN client transaction %p stopped, ref_cnt=%d", tsx, pj_grp_lock_get_ref(tsx->grp_lock))); return PJ_SUCCESS; }
/* * Request to retransmit the request. */ PJ_DEF(pj_status_t) pj_stun_client_tsx_retransmit(pj_stun_client_tsx *tsx, pj_bool_t mod_count) { if (tsx->destroy_timer.id != 0) { return PJ_SUCCESS; } pj_timer_heap_cancel_if_active(tsx->timer_heap, &tsx->retransmit_timer, TIMER_INACTIVE); return tsx_transmit_msg(tsx, mod_count); }
/* Schedule keep-alive timer */ static void start_ka_timer(pj_stun_sock *stun_sock) { pj_timer_heap_cancel_if_active(stun_sock->stun_cfg.timer_heap, &stun_sock->ka_timer, 0); pj_assert(stun_sock->ka_interval != 0); if (stun_sock->ka_interval > 0 && !stun_sock->is_destroying) { pj_time_val delay; delay.sec = stun_sock->ka_interval; delay.msec = 0; pj_timer_heap_schedule_w_grp_lock(stun_sock->stun_cfg.timer_heap, &stun_sock->ka_timer, &delay, PJ_TRUE, stun_sock->grp_lock); } }
PJ_DEF(pj_status_t) pj_stun_session_destroy(pj_stun_session *sess) { pj_stun_tx_data *tdata; PJ_ASSERT_RETURN(sess, PJ_EINVAL); TRACE_((SNAME(sess), "STUN session %p destroy request, ref_cnt=%d", sess, pj_grp_lock_get_ref(sess->grp_lock))); pj_grp_lock_acquire(sess->grp_lock); if (sess->is_destroying) { /* Prevent from decrementing the ref counter more than once */ pj_grp_lock_release(sess->grp_lock); return PJ_EINVALIDOP; } sess->is_destroying = PJ_TRUE; /* We need to stop transactions and cached response because they are * holding the group lock's reference counter while retransmitting. */ tdata = sess->pending_request_list.next; while (tdata != &sess->pending_request_list) { if (tdata->client_tsx) pj_stun_client_tsx_stop(tdata->client_tsx); tdata = tdata->next; } tdata = sess->cached_response_list.next; while (tdata != &sess->cached_response_list) { pj_timer_heap_cancel_if_active(tdata->sess->cfg->timer_heap, &tdata->res_timer, PJ_FALSE); tdata = tdata->next; } pj_grp_lock_dec_ref(sess->grp_lock); pj_grp_lock_release(sess->grp_lock); return PJ_SUCCESS; }
/* * Send outgoing message and start STUN transaction. */ PJ_DEF(pj_status_t) pj_stun_client_tsx_send_msg(pj_stun_client_tsx *tsx, pj_bool_t retransmit, void *pkt, unsigned pkt_len) { pj_status_t status; PJ_ASSERT_RETURN(tsx && pkt && pkt_len, PJ_EINVAL); PJ_ASSERT_RETURN(tsx->retransmit_timer.id == 0, PJ_EBUSY); pj_grp_lock_acquire(tsx->grp_lock); /* Encode message */ tsx->last_pkt = pkt; tsx->last_pkt_size = pkt_len; /* Update STUN retransmit flag */ tsx->require_retransmit = retransmit; /* For TCP, schedule timeout timer after PJ_STUN_TIMEOUT_VALUE. * Since we don't have timeout timer, simulate this by using * retransmit timer. */ if (!retransmit) { unsigned timeout; pj_assert(tsx->retransmit_timer.id == 0); tsx->transmit_count = PJ_STUN_MAX_TRANSMIT_COUNT; timeout = tsx->rto_msec * 16; tsx->retransmit_time.sec = timeout / 1000; tsx->retransmit_time.msec = timeout % 1000; /* Schedule timer first because when send_msg() failed we can * cancel it (as opposed to when schedule_timer() failed we cannot * cancel transmission). */; status = pj_timer_heap_schedule_w_grp_lock(tsx->timer_heap, &tsx->retransmit_timer, &tsx->retransmit_time, TIMER_ACTIVE, tsx->grp_lock); if (status != PJ_SUCCESS) { tsx->retransmit_timer.id = TIMER_INACTIVE; pj_grp_lock_release(tsx->grp_lock); return status; } } /* Send the message */ status = tsx_transmit_msg(tsx, PJ_TRUE); if (status != PJ_SUCCESS) { pj_timer_heap_cancel_if_active(tsx->timer_heap, &tsx->retransmit_timer, TIMER_INACTIVE); pj_grp_lock_release(tsx->grp_lock); return status; } pj_grp_lock_release(tsx->grp_lock); return PJ_SUCCESS; }
/* * Transmit message. */ static pj_status_t tsx_transmit_msg(pj_stun_client_tsx *tsx, pj_bool_t mod_count) { pj_status_t status; PJ_ASSERT_RETURN(tsx->retransmit_timer.id == TIMER_INACTIVE || !tsx->require_retransmit, PJ_EBUSY); if (tsx->require_retransmit && mod_count) { /* Calculate retransmit/timeout delay */ if (tsx->transmit_count == 0) { tsx->retransmit_time.sec = 0; tsx->retransmit_time.msec = tsx->rto_msec; } else if (tsx->transmit_count < PJ_STUN_MAX_TRANSMIT_COUNT-1) { unsigned msec; msec = PJ_TIME_VAL_MSEC(tsx->retransmit_time); msec <<= 1; tsx->retransmit_time.sec = msec / 1000; tsx->retransmit_time.msec = msec % 1000; } else { tsx->retransmit_time.sec = PJ_STUN_TIMEOUT_VALUE / 1000; tsx->retransmit_time.msec = PJ_STUN_TIMEOUT_VALUE % 1000; } /* Schedule timer first because when send_msg() failed we can * cancel it (as opposed to when schedule_timer() failed we cannot * cancel transmission). */; status = pj_timer_heap_schedule_w_grp_lock(tsx->timer_heap, &tsx->retransmit_timer, &tsx->retransmit_time, TIMER_ACTIVE, tsx->grp_lock); if (status != PJ_SUCCESS) { tsx->retransmit_timer.id = TIMER_INACTIVE; return status; } } if (mod_count) tsx->transmit_count++; PJ_LOG(5,(tsx->obj_name, "STUN sending message (transmit count=%d)", tsx->transmit_count)); pj_log_push_indent(); /* Send message */ status = tsx->cb.on_send_msg(tsx, tsx->last_pkt, tsx->last_pkt_size); if (status == PJNATH_ESTUNDESTROYED) { /* We've been destroyed, don't access the object. */ } else if (status != PJ_SUCCESS) { if (mod_count) { pj_timer_heap_cancel_if_active( tsx->timer_heap, &tsx->retransmit_timer, TIMER_INACTIVE); } PJ_PERROR(4, (tsx->obj_name, status, "STUN error sending message")); } pj_log_pop_indent(); return status; }
/* * Callback from TURN session when state has changed */ static void turn_on_state(pj_turn_session *sess, pj_turn_state_t old_state, pj_turn_state_t new_state) { pj_turn_sock *turn_sock = (pj_turn_sock*) pj_turn_session_get_user_data(sess); pj_status_t status; if (turn_sock == NULL) { /* We've been destroyed */ return; } /* Notify app first */ if (turn_sock->cb.on_state) { (*turn_sock->cb.on_state)(turn_sock, old_state, new_state); } /* Make sure user hasn't destroyed us in the callback */ if (turn_sock->sess && new_state == PJ_TURN_STATE_RESOLVED) { pj_turn_session_info info; pj_turn_session_get_info(turn_sock->sess, &info); new_state = info.state; } if (turn_sock->sess && new_state == PJ_TURN_STATE_RESOLVED) { /* * Once server has been resolved, initiate outgoing TCP * connection to the server. */ pj_turn_session_info info; char addrtxt[PJ_INET6_ADDRSTRLEN+8]; int sock_type; pj_sock_t sock; pj_activesock_cfg asock_cfg; pj_activesock_cb asock_cb; pj_sockaddr bound_addr, *cfg_bind_addr; pj_uint16_t max_bind_retry; /* Close existing connection, if any. This happens when * we're switching to alternate TURN server when either TCP * connection or ALLOCATE request failed. */ if (turn_sock->active_sock) { pj_activesock_close(turn_sock->active_sock); turn_sock->active_sock = NULL; } /* Get server address from session info */ pj_turn_session_get_info(sess, &info); if (turn_sock->conn_type == PJ_TURN_TP_UDP) sock_type = pj_SOCK_DGRAM(); else sock_type = pj_SOCK_STREAM(); /* Init socket */ status = pj_sock_socket(turn_sock->af, sock_type, 0, &sock); if (status != PJ_SUCCESS) { pj_turn_sock_destroy(turn_sock); return; } /* Bind socket */ cfg_bind_addr = &turn_sock->setting.bound_addr; max_bind_retry = MAX_BIND_RETRY; if (turn_sock->setting.port_range && turn_sock->setting.port_range < max_bind_retry) { max_bind_retry = turn_sock->setting.port_range; } pj_sockaddr_init(turn_sock->af, &bound_addr, NULL, 0); if (cfg_bind_addr->addr.sa_family == pj_AF_INET() || cfg_bind_addr->addr.sa_family == pj_AF_INET6()) { pj_sockaddr_cp(&bound_addr, cfg_bind_addr); } status = pj_sock_bind_random(sock, &bound_addr, turn_sock->setting.port_range, max_bind_retry); if (status != PJ_SUCCESS) { pj_turn_sock_destroy(turn_sock); return; } /* Apply QoS, if specified */ status = pj_sock_apply_qos2(sock, turn_sock->setting.qos_type, &turn_sock->setting.qos_params, (turn_sock->setting.qos_ignore_error?2:1), turn_sock->pool->obj_name, NULL); if (status != PJ_SUCCESS && !turn_sock->setting.qos_ignore_error) { pj_turn_sock_destroy(turn_sock); return; } /* Apply socket buffer size */ if (turn_sock->setting.so_rcvbuf_size > 0) { unsigned sobuf_size = turn_sock->setting.so_rcvbuf_size; status = pj_sock_setsockopt_sobuf(sock, pj_SO_RCVBUF(), PJ_TRUE, &sobuf_size); if (status != PJ_SUCCESS) { pj_perror(3, turn_sock->obj_name, status, "Failed setting SO_RCVBUF"); } else { if (sobuf_size < turn_sock->setting.so_rcvbuf_size) { PJ_LOG(4, (turn_sock->obj_name, "Warning! Cannot set SO_RCVBUF as configured," " now=%d, configured=%d", sobuf_size, turn_sock->setting.so_rcvbuf_size)); } else { PJ_LOG(5, (turn_sock->obj_name, "SO_RCVBUF set to %d", sobuf_size)); } } } if (turn_sock->setting.so_sndbuf_size > 0) { unsigned sobuf_size = turn_sock->setting.so_sndbuf_size; status = pj_sock_setsockopt_sobuf(sock, pj_SO_SNDBUF(), PJ_TRUE, &sobuf_size); if (status != PJ_SUCCESS) { pj_perror(3, turn_sock->obj_name, status, "Failed setting SO_SNDBUF"); } else { if (sobuf_size < turn_sock->setting.so_sndbuf_size) { PJ_LOG(4, (turn_sock->obj_name, "Warning! Cannot set SO_SNDBUF as configured," " now=%d, configured=%d", sobuf_size, turn_sock->setting.so_sndbuf_size)); } else { PJ_LOG(5, (turn_sock->obj_name, "SO_SNDBUF set to %d", sobuf_size)); } } } /* Create active socket */ pj_activesock_cfg_default(&asock_cfg); asock_cfg.grp_lock = turn_sock->grp_lock; pj_bzero(&asock_cb, sizeof(asock_cb)); asock_cb.on_data_read = &on_data_read; asock_cb.on_connect_complete = &on_connect_complete; status = pj_activesock_create(turn_sock->pool, sock, sock_type, &asock_cfg, turn_sock->cfg.ioqueue, &asock_cb, turn_sock, &turn_sock->active_sock); if (status != PJ_SUCCESS) { pj_turn_sock_destroy(turn_sock); return; } PJ_LOG(5,(turn_sock->pool->obj_name, "Connecting to %s", pj_sockaddr_print(&info.server, addrtxt, sizeof(addrtxt), 3))); /* Initiate non-blocking connect */ #if PJ_HAS_TCP status=pj_activesock_start_connect(turn_sock->active_sock, turn_sock->pool, &info.server, pj_sockaddr_get_len(&info.server)); if (status == PJ_SUCCESS) { on_connect_complete(turn_sock->active_sock, PJ_SUCCESS); } else if (status != PJ_EPENDING) { pj_turn_sock_destroy(turn_sock); return; } #else on_connect_complete(turn_sock->active_sock, PJ_SUCCESS); #endif /* Done for now. Subsequent work will be done in * on_connect_complete() callback. */ } if (new_state >= PJ_TURN_STATE_DESTROYING && turn_sock->sess) { pj_time_val delay = {0, 0}; turn_sock->sess = NULL; pj_turn_session_set_user_data(sess, NULL); pj_timer_heap_cancel_if_active(turn_sock->cfg.timer_heap, &turn_sock->timer, 0); pj_timer_heap_schedule_w_grp_lock(turn_sock->cfg.timer_heap, &turn_sock->timer, &delay, TIMER_DESTROY, turn_sock->grp_lock); } }