Пример #1
0
PJ_DEF(pj_status_t) pj_stun_client_tsx_schedule_destroy(
				    pj_stun_client_tsx *tsx,
				    const pj_time_val *delay)
{
    pj_status_t status;

    PJ_ASSERT_RETURN(tsx && delay, PJ_EINVAL);
    PJ_ASSERT_RETURN(tsx->cb.on_destroy, PJ_EINVAL);

    /* Cancel previously registered timer */
    if (tsx->destroy_timer.id != 0) {
	pj_timer_heap_cancel(tsx->timer_heap, &tsx->destroy_timer);
	tsx->destroy_timer.id = 0;
    }

    /* Stop retransmission, just in case */
    if (tsx->retransmit_timer.id != 0) {
	pj_timer_heap_cancel(tsx->timer_heap, &tsx->retransmit_timer);
	tsx->retransmit_timer.id = 0;
    }

    status = pj_timer_heap_schedule(tsx->timer_heap,
				    &tsx->destroy_timer, delay);
    if (status != PJ_SUCCESS)
	return status;

    tsx->destroy_timer.id = TIMER_ACTIVE;
    tsx->cb.on_complete = NULL;

    return PJ_SUCCESS;
}
Пример #2
0
/*
 * Destroy transaction immediately.
 */
PJ_DEF(pj_status_t) pj_stun_client_tsx_destroy(pj_stun_client_tsx *tsx)
{
    PJ_ASSERT_RETURN(tsx, PJ_EINVAL);

    if (tsx->retransmit_timer.id != 0) {
	pj_timer_heap_cancel(tsx->timer_heap, &tsx->retransmit_timer);
	tsx->retransmit_timer.id = 0;
    }
    if (tsx->destroy_timer.id != 0) {
	pj_timer_heap_cancel(tsx->timer_heap, &tsx->destroy_timer);
	tsx->destroy_timer.id = 0;
    }

    PJ_LOG(5,(tsx->obj_name, "STUN client transaction destroyed"));
    return PJ_SUCCESS;
}
Пример #3
0
static int timer_cancel_entry(pj_timer_entry* entry)
{
    if (timer_initialized && timer != NULL)
        return pj_timer_heap_cancel(timer, entry);
    else
        return PJ_EIGNORED;
}
Пример #4
0
static void destroy_tdata(pj_stun_tx_data *tdata, pj_bool_t force)
{
    if (tdata->res_timer.id != PJ_FALSE) {
	pj_timer_heap_cancel(tdata->sess->cfg->timer_heap, 
			     &tdata->res_timer);
	tdata->res_timer.id = PJ_FALSE;
	pj_list_erase(tdata);
    }

    if (force) {
	if (tdata->client_tsx) {
	    tsx_erase(tdata->sess, tdata);
	    pj_stun_client_tsx_destroy(tdata->client_tsx);
	}
	pj_pool_release(tdata->pool);

    } else {
	if (tdata->client_tsx) {
	    pj_time_val delay = {2, 0};
	    pj_stun_client_tsx_schedule_destroy(tdata->client_tsx, &delay);

	} else {
	    pj_pool_release(tdata->pool);
	}
    }
}
Пример #5
0
/* Initiate shutdown sequence for this allocation and start destroy timer.
 * Once allocation is marked as shutting down, any packets will be
 * rejected/discarded
 */
static void alloc_shutdown(pj_turn_allocation *alloc)
{
    pj_time_val destroy_delay = DESTROY_DELAY;

    /* Work with existing schedule */
    if (alloc->relay.timer.id == TIMER_ID_TIMEOUT) {
	/* Cancel existing shutdown timer */
	pj_timer_heap_cancel(alloc->server->core.timer_heap,
			     &alloc->relay.timer);
	alloc->relay.timer.id = TIMER_ID_NONE;

    } else if (alloc->relay.timer.id == TIMER_ID_DESTROY) {
	/* We've been scheduled to be destroyed, ignore this
	 * shutdown request.
	 */
	return;
    }

    pj_assert(alloc->relay.timer.id == TIMER_ID_NONE);

    /* Shutdown relay socket */
    destroy_relay(&alloc->relay);

    /* Don't unregister from hash table because we still need to
     * handle REFRESH retransmission.
     */

    /* Schedule destroy timer */
    alloc->relay.timer.id = TIMER_ID_DESTROY;
    pj_timer_heap_schedule(alloc->server->core.timer_heap,
			   &alloc->relay.timer, &destroy_delay);
}
Пример #6
0
static void end_session(nat_detect_session *sess,
			pj_status_t status,
			pj_stun_nat_type nat_type)
{
    pj_stun_nat_detect_result result;
    char errmsg[PJ_ERR_MSG_SIZE];
    pj_time_val delay;

    if (sess->timer.id != 0) {
	pj_timer_heap_cancel(sess->timer_heap, &sess->timer);
	sess->timer.id = 0;
    }

    pj_bzero(&result, sizeof(result));
    errmsg[0] = '\0';
    result.status_text = errmsg;

    result.status = status;
    pj_strerror(status, errmsg, sizeof(errmsg));
    result.nat_type = nat_type;
    result.nat_type_name = nat_type_names[result.nat_type];

    if (sess->cb)
	(*sess->cb)(sess->user_data, &result);

    delay.sec = 0;
    delay.msec = 0;

    sess->timer.id = TIMER_DESTROY;
    pj_timer_heap_schedule(sess->timer_heap, &sess->timer, &delay);
}
Пример #7
0
/* Reschedule timeout using current lifetime setting */
static pj_status_t resched_timeout(pj_turn_allocation *alloc)
{
    pj_time_val delay;
    pj_status_t status;

    pj_gettimeofday(&alloc->relay.expiry);
    alloc->relay.expiry.sec += alloc->relay.lifetime;

    pj_assert(alloc->relay.timer.id != TIMER_ID_DESTROY);
    if (alloc->relay.timer.id != 0) {
	pj_timer_heap_cancel(alloc->server->core.timer_heap,
			     &alloc->relay.timer);
	alloc->relay.timer.id = TIMER_ID_NONE;
    }

    delay.sec = alloc->relay.lifetime;
    delay.msec = 0;

    alloc->relay.timer.id = TIMER_ID_TIMEOUT;
    status = pj_timer_heap_schedule(alloc->server->core.timer_heap,
				    &alloc->relay.timer, &delay);
    if (status != PJ_SUCCESS) {
	alloc->relay.timer.id = TIMER_ID_NONE;
	return status;
    }

    return PJ_SUCCESS;
}
Пример #8
0
PJ_DEF(void)pj_stun_tsx_cancel_timer(pj_stun_client_tsx *tsx)
{
    if (tsx->retransmit_timer.id) {
        pj_timer_heap_cancel(tsx->timer_heap, &tsx->retransmit_timer);
        tsx->retransmit_timer.id = 0;
    }
}
Пример #9
0
/*
 * Send outgoing message and start STUN transaction.
 */
PJ_DEF(pj_status_t) pj_stun_client_tsx_send_msg(pj_stun_client_tsx *tsx,
						pj_bool_t retransmit,
						void *pkt,
						unsigned pkt_len)
{
    pj_status_t status;

    PJ_ASSERT_RETURN(tsx && pkt && pkt_len, PJ_EINVAL);
    PJ_ASSERT_RETURN(tsx->retransmit_timer.id == 0, PJ_EBUSY);

    /* Encode message */
    tsx->last_pkt = pkt;
    tsx->last_pkt_size = pkt_len;

    /* Update STUN retransmit flag */
    tsx->require_retransmit = retransmit;

    /* For TCP, schedule timeout timer after PJ_STUN_TIMEOUT_VALUE.
     * Since we don't have timeout timer, simulate this by using
     * retransmit timer.
     */
    if (!retransmit) {
	unsigned timeout;

	pj_assert(tsx->retransmit_timer.id == 0);
	tsx->transmit_count = PJ_STUN_MAX_TRANSMIT_COUNT;

	timeout = tsx->rto_msec * 16;
	tsx->retransmit_time.sec = timeout / 1000;
	tsx->retransmit_time.msec = timeout % 1000;

	/* Schedule timer first because when send_msg() failed we can
	 * cancel it (as opposed to when schedule_timer() failed we cannot
	 * cancel transmission).
	 */;
	status = pj_timer_heap_schedule(tsx->timer_heap, 
					&tsx->retransmit_timer,
					&tsx->retransmit_time);
	if (status != PJ_SUCCESS) {
	    tsx->retransmit_timer.id = 0;
	    return status;
	}
	tsx->retransmit_timer.id = TIMER_ACTIVE;
    }

    /* Send the message */
    status = tsx_transmit_msg(tsx, PJ_TRUE);
    if (status != PJ_SUCCESS) {
	if (tsx->retransmit_timer.id != 0) {
	    pj_timer_heap_cancel(tsx->timer_heap, 
				 &tsx->retransmit_timer);
	    tsx->retransmit_timer.id = 0;
	}
	return status;
    }

    return PJ_SUCCESS;
}
Пример #10
0
PJ_DEF(int) pj_timer_heap_cancel_if_active(pj_timer_heap_t *ht,
                                           pj_timer_entry *entry,
                                           int id_val)
{
    int count = pj_timer_heap_cancel(ht, entry);
    if (count == 1)
    	entry->id = id_val;
    
    return count;
}
Пример #11
0
/*
 * Request to retransmit the request.
 */
PJ_DEF(pj_status_t) pj_stun_client_tsx_retransmit(pj_stun_client_tsx *tsx)
{
    if (tsx->destroy_timer.id != 0) {
	return PJ_SUCCESS;
    }

    if (tsx->retransmit_timer.id != 0) {
	pj_timer_heap_cancel(tsx->timer_heap, &tsx->retransmit_timer);
	tsx->retransmit_timer.id = 0;
    }

    return tsx_transmit_msg(tsx);
}
Пример #12
0
static int32_t zrtp_cancelTimer(ZrtpContext* ctx)
{
    struct tp_zrtp *zrtp = (struct tp_zrtp*)ctx->userData;

#ifndef DYNAMIC_TIMER
    timer_cancel_entry(&zrtp->timeoutEntry);
#else
    if(zrtp->timer_heap != NULL){
    	pj_timer_heap_cancel(zrtp->timer_heap, &zrtp->timeoutEntry);
    }
#endif

    return 1;
}
Пример #13
0
/* Destroy relay resource */
static void destroy_relay(pj_turn_relay_res *relay)
{
    if (relay->timer.id) {
	pj_timer_heap_cancel(relay->allocation->server->core.timer_heap,
			     &relay->timer);
	relay->timer.id = PJ_FALSE;
    }

    if (relay->tp.key) {
	pj_ioqueue_unregister(relay->tp.key);
	relay->tp.key = NULL;
	relay->tp.sock = PJ_INVALID_SOCKET;
    } else if (relay->tp.sock != PJ_INVALID_SOCKET) {
	pj_sock_close(relay->tp.sock);
	relay->tp.sock = PJ_INVALID_SOCKET;
    }

    /* Mark as shutdown */
    relay->lifetime = 0;
}
Пример #14
0
/* Destroy */
static void do_destroy(pj_tcp_session *sess)
{
    /* Lock session */
    if (sess->lock) {
        pj_lock_acquire(sess->lock);
    }

    /* Cancel pending timer, if any */
    if (sess->timer.id != TIMER_NONE) {
        pj_timer_heap_cancel(sess->timer_heap, &sess->timer);
        sess->timer.id = TIMER_NONE;
    }

    /* Destroy STUN session */
    if (sess->stun) {
        pj_stun_session_destroy(sess->stun);
        sess->stun = NULL;
    }

    /* Destroy lock */
    if (sess->lock) {
        pj_lock_release(sess->lock);
        pj_lock_destroy(sess->lock);
        sess->lock = NULL;
    }
#if 0
    if (sess->asock) {
        pj_activesock_close(sess->asock);
        sess->asock = NULL;
    }
#endif
    /* Destroy pool */
    if (sess->pool) {
        pj_pool_t *pool = sess->pool;

        PJ_LOG(4,(sess->obj_name, "TCP client session destroyed"));

        sess->pool = NULL;
        pj_pool_release(pool);
    }
}
Пример #15
0
/* Destroy */
PJ_DEF(pj_status_t) pj_stun_sock_destroy(pj_stun_sock *stun_sock)
{
    if (stun_sock->q) {
	pj_dns_srv_cancel_query(stun_sock->q, PJ_FALSE);
	stun_sock->q = NULL;
    }

    /* Destroy the active socket first just in case we'll get
     * stray callback.
     */
    if (stun_sock->active_sock != NULL) {
	pj_activesock_close(stun_sock->active_sock);
	stun_sock->active_sock = NULL;
	stun_sock->sock_fd = PJ_INVALID_SOCKET;
    } else if (stun_sock->sock_fd != PJ_INVALID_SOCKET) {
	pj_sock_close(stun_sock->sock_fd);
	stun_sock->sock_fd = PJ_INVALID_SOCKET;
    }

    if (stun_sock->ka_timer.id != 0) {
	pj_timer_heap_cancel(stun_sock->stun_cfg.timer_heap, 
			     &stun_sock->ka_timer);
	stun_sock->ka_timer.id = 0;
    }

    if (stun_sock->stun_sess) {
	pj_stun_session_destroy(stun_sock->stun_sess);
	stun_sock->stun_sess = NULL;
    }

    if (stun_sock->pool) {
	pj_pool_t *pool = stun_sock->pool;
	stun_sock->pool = NULL;
	pj_pool_release(pool);
    }

    return PJ_SUCCESS;
}
Пример #16
0
/* Schedule keep-alive timer */
static void start_ka_timer(pj_stun_sock *stun_sock)
{
    if (stun_sock->ka_timer.id != 0) {
	pj_timer_heap_cancel(stun_sock->stun_cfg.timer_heap, 
			     &stun_sock->ka_timer);
	stun_sock->ka_timer.id = 0;
    }

    pj_assert(stun_sock->ka_interval != 0);
    if (stun_sock->ka_interval > 0) {
	pj_time_val delay;

	delay.sec = stun_sock->ka_interval;
	delay.msec = 0;

	if (pj_timer_heap_schedule(stun_sock->stun_cfg.timer_heap, 
				   &stun_sock->ka_timer, 
				   &delay) == PJ_SUCCESS)
	{
	    stun_sock->ka_timer.id = PJ_TRUE;
	}
    }
}
Пример #17
0
/*
 * Notify the STUN transaction about the arrival of STUN response.
 */
PJ_DEF(pj_status_t) pj_stun_client_tsx_on_rx_msg(pj_stun_client_tsx *tsx,
						 const pj_stun_msg *msg,
						 const pj_sockaddr_t *src_addr,
						 unsigned src_addr_len)
{
    pj_stun_errcode_attr *err_attr;
    pj_status_t status;

    /* Must be STUN response message */
    if (!PJ_STUN_IS_SUCCESS_RESPONSE(msg->hdr.type) && 
	!PJ_STUN_IS_ERROR_RESPONSE(msg->hdr.type))
    {
	PJ_LOG(4,(tsx->obj_name, 
		  "STUN rx_msg() error: not response message"));
	return PJNATH_EINSTUNMSGTYPE;
    }


    /* We have a response with matching transaction ID. 
     * We can cancel retransmit timer now.
     */
    if (tsx->retransmit_timer.id) {
	pj_timer_heap_cancel(tsx->timer_heap, &tsx->retransmit_timer);
	tsx->retransmit_timer.id = 0;
    }

    /* Find STUN error code attribute */
    err_attr = (pj_stun_errcode_attr*) 
		pj_stun_msg_find_attr(msg, PJ_STUN_ATTR_ERROR_CODE, 0);

    if (err_attr && err_attr->err_code <= 200) {
	/* draft-ietf-behave-rfc3489bis-05.txt Section 8.3.2:
	 * Any response between 100 and 299 MUST result in the cessation
	 * of request retransmissions, but otherwise is discarded.
	 */
	PJ_LOG(4,(tsx->obj_name, 
		  "STUN rx_msg() error: received provisional %d code (%.*s)",
		  err_attr->err_code,
		  (int)err_attr->reason.slen,
		  err_attr->reason.ptr));
	return PJ_SUCCESS;
    }

    if (err_attr == NULL) {
	status = PJ_SUCCESS;
    } else {
	status = PJ_STATUS_FROM_STUN_CODE(err_attr->err_code);
    }

    /* Call callback */
    if (!tsx->complete) {
	tsx->complete = PJ_TRUE;
	if (tsx->cb.on_complete) {
	    tsx->cb.on_complete(tsx, status, msg, src_addr, src_addr_len);
	}
	/* We might have been destroyed, don't try to access the object */
    }

    return PJ_SUCCESS;

}
Пример #18
0
 //
 // Cancel timer.
 //
 bool cancel_timer(Pj_Event_Handler *handler)
 {
     return pj_timer_heap_cancel(th_, &handler->timer_) == 1;
 }
Пример #19
0
    //
    // Cancel a timer.
    //
    bool cancel(Pj_Timer_Entry *ent)
    {
	return pj_timer_heap_cancel(ht_, &ent->entry_) == 1;
    }
static int test_timer_heap(void)
{
    int i, j;
    pj_timer_entry *entry;
    pj_pool_t *pool;
    pj_timer_heap_t *timer;
    pj_time_val delay;
    pj_status_t rc;    int err=0;
    unsigned size, count;

    size = pj_timer_heap_mem_size(MAX_COUNT)+MAX_COUNT*sizeof(pj_timer_entry);
    pool = pj_pool_create( mem, NULL, size, 4000, NULL);
    if (!pool) {
	PJ_LOG(3,("test", "...error: unable to create pool of %u bytes",
		  size));
	return -10;
    }

    entry = (pj_timer_entry*)pj_pool_calloc(pool, MAX_COUNT, sizeof(*entry));
    if (!entry)
	return -20;

    for (i=0; i<MAX_COUNT; ++i) {
	entry[i].cb = &timer_callback;
    }
    rc = pj_timer_heap_create(pool, MAX_COUNT, &timer);
    if (rc != PJ_SUCCESS) {
        app_perror("...error: unable to create timer heap", rc);
	return -30;
    }

    count = MIN_COUNT;
    for (i=0; i<LOOP; ++i) {
	int early = 0;
	int done=0;
	int cancelled=0;
	int rc;
	pj_timestamp t1, t2, t_sched, t_cancel, t_poll;
	pj_time_val now, expire;

	pj_gettimeofday(&now);
	pj_srand(now.sec);
	t_sched.u32.lo = t_cancel.u32.lo = t_poll.u32.lo = 0;

	// Register timers
	for (j=0; j<(int)count; ++j) {
	    delay.sec = pj_rand() % DELAY;
	    delay.msec = pj_rand() % 1000;

	    // Schedule timer
	    pj_get_timestamp(&t1);
	    rc = pj_timer_heap_schedule(timer, &entry[j], &delay);
	    if (rc != 0)
		return -40;
	    pj_get_timestamp(&t2);

	    t_sched.u32.lo += (t2.u32.lo - t1.u32.lo);

	    // Poll timers.
	    pj_get_timestamp(&t1);
	    rc = pj_timer_heap_poll(timer, NULL);
	    pj_get_timestamp(&t2);
	    if (rc > 0) {
		t_poll.u32.lo += (t2.u32.lo - t1.u32.lo);
		early += rc;
	    }
	}

	// Set the time where all timers should finish
	pj_gettimeofday(&expire);
	delay.sec = DELAY; 
	delay.msec = 0;
	PJ_TIME_VAL_ADD(expire, delay);

	// Wait unfil all timers finish, cancel some of them.
	do {
	    int index = pj_rand() % count;
	    pj_get_timestamp(&t1);
	    rc = pj_timer_heap_cancel(timer, &entry[index]);
	    pj_get_timestamp(&t2);
	    if (rc > 0) {
		cancelled += rc;
		t_cancel.u32.lo += (t2.u32.lo - t1.u32.lo);
	    }

	    pj_gettimeofday(&now);

	    pj_get_timestamp(&t1);
#if defined(PJ_SYMBIAN) && PJ_SYMBIAN!=0
	    /* On Symbian, we must use OS poll (Active Scheduler poll) since 
	     * timer is implemented using Active Object.
	     */
	    rc = 0;
	    while (pj_symbianos_poll(-1, 0))
		++rc;
#else
	    rc = pj_timer_heap_poll(timer, NULL);
#endif
	    pj_get_timestamp(&t2);
	    if (rc > 0) {
		done += rc;
		t_poll.u32.lo += (t2.u32.lo - t1.u32.lo);
	    }

	} while (PJ_TIME_VAL_LTE(now, expire)&&pj_timer_heap_count(timer) > 0);

	if (pj_timer_heap_count(timer)) {
	    PJ_LOG(3, (THIS_FILE, "ERROR: %d timers left", 
		       pj_timer_heap_count(timer)));
	    ++err;
	}
	t_sched.u32.lo /= count; 
	t_cancel.u32.lo /= count;
	t_poll.u32.lo /= count;
	PJ_LOG(4, (THIS_FILE, 
	        "...ok (count:%d, early:%d, cancelled:%d, "
		"sched:%d, cancel:%d poll:%d)", 
		count, early, cancelled, t_sched.u32.lo, t_cancel.u32.lo,
		t_poll.u32.lo));

	count = count * 2;
	if (count > MAX_COUNT)
	    break;
    }

    pj_pool_release(pool);
    return err;
}
Пример #21
0
/*! \brief Helper function for changing the T.38 state */
static void t38_change_state(struct ast_sip_session *session, struct ast_sip_session_media *session_media,
	struct t38_state *state, enum ast_sip_session_t38state new_state)
{
	enum ast_sip_session_t38state old_state = session->t38state;
	struct ast_control_t38_parameters parameters = { .request_response = 0, };
	pj_time_val delay = { .sec = T38_AUTOMATIC_REJECTION_SECONDS };

	if (old_state == new_state) {
		return;
	}

	session->t38state = new_state;
	ast_debug(2, "T.38 state changed to '%u' from '%u' on channel '%s'\n",
		new_state, old_state,
		session->channel ? ast_channel_name(session->channel) : "<gone>");

	if (pj_timer_heap_cancel(pjsip_endpt_get_timer_heap(ast_sip_get_pjsip_endpoint()), &state->timer)) {
		ast_debug(2, "Automatic T.38 rejection on channel '%s' terminated\n",
			session->channel ? ast_channel_name(session->channel) : "<gone>");
		ao2_ref(session, -1);
	}

	if (!session->channel) {
		return;
	}

	switch (new_state) {
	case T38_PEER_REINVITE:
		ao2_ref(session, +1);
		if (pjsip_endpt_schedule_timer(ast_sip_get_pjsip_endpoint(), &state->timer, &delay) != PJ_SUCCESS) {
			ast_log(LOG_WARNING, "Scheduling of automatic T.38 rejection for channel '%s' failed\n",
				ast_channel_name(session->channel));
			ao2_ref(session, -1);
		}
		parameters = state->their_parms;
		parameters.max_ifp = ast_udptl_get_far_max_ifp(session_media->udptl);
		parameters.request_response = AST_T38_REQUEST_NEGOTIATE;
		ast_udptl_set_tag(session_media->udptl, "%s", ast_channel_name(session->channel));
		break;
	case T38_ENABLED:
		parameters = state->their_parms;
		parameters.max_ifp = ast_udptl_get_far_max_ifp(session_media->udptl);
		parameters.request_response = AST_T38_NEGOTIATED;
		ast_udptl_set_tag(session_media->udptl, "%s", ast_channel_name(session->channel));
		break;
	case T38_REJECTED:
	case T38_DISABLED:
		if (old_state == T38_ENABLED) {
			parameters.request_response = AST_T38_TERMINATED;
		} else if (old_state == T38_LOCAL_REINVITE) {
			parameters.request_response = AST_T38_REFUSED;
		}
		break;
	case T38_LOCAL_REINVITE:
		/* wait until we get a peer response before responding to local reinvite */
		break;
	case T38_MAX_ENUM:
		/* Well, that shouldn't happen */
		ast_assert(0);
		break;
	}

	if (parameters.request_response) {
		ast_queue_control_data(session->channel, AST_CONTROL_T38_PARAMETERS, &parameters, sizeof(parameters));
	}
}

/*! \brief Task function which rejects a T.38 re-invite and resumes handling it */
static int t38_automatic_reject(void *obj)
{
	RAII_VAR(struct ast_sip_session *, session, obj, ao2_cleanup);
	RAII_VAR(struct ast_datastore *, datastore, ast_sip_session_get_datastore(session, "t38"), ao2_cleanup);
	RAII_VAR(struct ast_sip_session_media *, session_media, ao2_find(session->media, "image", OBJ_KEY), ao2_cleanup);

	if (!datastore) {
		return 0;
	}

	ast_debug(2, "Automatically rejecting T.38 request on channel '%s'\n",
		session->channel ? ast_channel_name(session->channel) : "<gone>");

	t38_change_state(session, session_media, datastore->data, T38_REJECTED);
	ast_sip_session_resume_reinvite(session);

	return 0;
}
Пример #22
0
/*
 * Callback from TURN session when state has changed
 */
static void turn_on_state(pj_turn_session *sess, 
			  pj_turn_state_t old_state,
			  pj_turn_state_t new_state)
{
    pj_turn_sock *turn_sock = (pj_turn_sock*) 
			   pj_turn_session_get_user_data(sess);
    pj_status_t status;

    if (turn_sock == NULL) {
	/* We've been destroyed */
	return;
    }

    /* Notify app first */
    if (turn_sock->cb.on_state) {
	(*turn_sock->cb.on_state)(turn_sock, old_state, new_state);
    }

    /* Make sure user hasn't destroyed us in the callback */
    if (turn_sock->sess && new_state == PJ_TURN_STATE_RESOLVED) {
	pj_turn_session_info info;
	pj_turn_session_get_info(turn_sock->sess, &info);
	new_state = info.state;
    }

    if (turn_sock->sess && new_state == PJ_TURN_STATE_RESOLVED) {
	/*
	 * Once server has been resolved, initiate outgoing TCP
	 * connection to the server.
	 */
	pj_turn_session_info info;
	char addrtxt[PJ_INET6_ADDRSTRLEN+8];
	int sock_type;
	pj_sock_t sock;
	pj_activesock_cb asock_cb;
    pj_activesock_cfg asock_cfg;

	/* Close existing connection, if any. This happens when
	 * we're switching to alternate TURN server when either TCP
	 * connection or ALLOCATE request failed.
	 */
	if (turn_sock->active_sock) {
		PJ_LOG(4, (THIS_FILE, "turn_on_state() Close connection for new_state == PJ_TURN_STATE_RESOLVED."));
	    pj_activesock_close(turn_sock->active_sock);
	    turn_sock->active_sock = NULL;
	}

	/* Get server address from session info */
	pj_turn_session_get_info(sess, &info);

	if (turn_sock->conn_type == PJ_TURN_TP_UDP)
	    sock_type = pj_SOCK_DGRAM();
	else
	    sock_type = pj_SOCK_STREAM();

	/* Init socket */
	status = pj_sock_socket(turn_sock->af, sock_type, 0, &sock);
	if (status != PJ_SUCCESS) {
		PJ_LOG(1, (THIS_FILE, "turn_on_state() Failed to destroy turn_sock for sock creation. status=[%d]", status));
	    pj_turn_sock_destroy(turn_sock);
	    return;
	}

	{
		int flag = turn_sock->setting.sock_recv_buf_size ? turn_sock->setting.sock_recv_buf_size : PJ_TCP_MAX_PKT_LEN;

		status = pj_sock_setsockopt(sock, pj_SOL_SOCKET(), pj_SO_RCVBUF(),
			&flag, sizeof(flag));
		if (status != PJ_SUCCESS) {
			PJ_LOG(2, (THIS_FILE, "turn_on_state() Failed to set SO_RCVBUF option. status=[%d]", status));
			return;
		}

		flag = turn_sock->setting.sock_send_buf_size ? turn_sock->setting.sock_send_buf_size : PJ_SOCKET_SND_BUFFER_SIZE;
		status = pj_sock_setsockopt(sock, pj_SOL_SOCKET(), pj_SO_SNDBUF(),
			&flag, sizeof(flag));
		if (status != PJ_SUCCESS) {
			PJ_LOG(2, (THIS_FILE, "turn_on_state() Failed to set SO_SNDBUF option. status=[%d]", status));
			return;
		}
	}

        /* Apply QoS, if specified */
	status = pj_sock_apply_qos2(sock, turn_sock->setting.qos_type,
				    &turn_sock->setting.qos_params, 
				    (turn_sock->setting.qos_ignore_error?2:1),
				    turn_sock->pool->obj_name, NULL);
	if (status != PJ_SUCCESS && !turn_sock->setting.qos_ignore_error) {
		PJ_LOG(1, (THIS_FILE, "turn_on_state() Failed to destroy turn_sock for pj_sock_apply_qos2. status=[%d]", status));
	    pj_turn_sock_destroy(turn_sock);
	    return;
	}

	/* Create active socket */
	pj_activesock_cfg_default(&asock_cfg);
	asock_cfg.concurrency = 1;
	asock_cfg.whole_data = PJ_TRUE;

    pj_bzero(&asock_cb, sizeof(asock_cb));
	asock_cb.on_data_read = &on_data_read;
	asock_cb.on_connect_complete = &on_connect_complete;
	status = pj_activesock_create(turn_sock->pool, sock,
				      sock_type, &asock_cfg,
				      turn_sock->cfg.ioqueue, &asock_cb, 
				      turn_sock,
				      &turn_sock->active_sock);
	if (status != PJ_SUCCESS) {
		PJ_LOG(1, (THIS_FILE, "turn_on_state() Failed to destroy turn_sock for pj_activesock_create. status=[%d]", status));
	    pj_turn_sock_destroy(turn_sock);
	    return;
	}

	PJ_LOG(5,(turn_sock->pool->obj_name,
		  "Connecting to %s", 
		  pj_sockaddr_print(&info.server, addrtxt, 
				    sizeof(addrtxt), 3)));

	/* Initiate non-blocking connect */
#if PJ_HAS_TCP
	status=pj_activesock_start_connect(turn_sock->active_sock, 
					   turn_sock->pool,
					   &info.server, 
					   pj_sockaddr_get_len(&info.server));
	if (status == PJ_SUCCESS) {
	    on_connect_complete(turn_sock->active_sock, PJ_SUCCESS);
	} else if (status != PJ_EPENDING) {
		PJ_LOG(1, (THIS_FILE, "turn_on_state() Failed to destroy turn_sock for pj_activesock_start_connect. status=[%d]", status));
	    pj_turn_sock_destroy(turn_sock);
	    return;
	}
#else
	on_connect_complete(turn_sock->active_sock, PJ_SUCCESS);
#endif

	/* Done for now. Subsequent work will be done in 
	 * on_connect_complete() callback.
	 */
    }

    if (new_state >= PJ_TURN_STATE_DESTROYING && turn_sock->sess) {
	pj_time_val delay = {0, 0};

	turn_sock->sess = NULL;
	pj_turn_session_set_user_data(sess, NULL);

	if (turn_sock->timer.id) {
	    pj_timer_heap_cancel(turn_sock->cfg.timer_heap, &turn_sock->timer);
	    turn_sock->timer.id = 0;
	}

	turn_sock->timer.id = TIMER_DESTROY;
	pj_timer_heap_schedule(turn_sock->cfg.timer_heap, &turn_sock->timer, 
			       &delay);
    }
}
Пример #23
0
/*
 * Transmit message.
 */
static pj_status_t tsx_transmit_msg(pj_stun_client_tsx *tsx,
                                    pj_bool_t mod_count)
{
    pj_status_t status;

    PJ_ASSERT_RETURN(tsx->retransmit_timer.id == 0 ||
		     !tsx->require_retransmit, PJ_EBUSY);

    if (tsx->require_retransmit && mod_count) {
	/* Calculate retransmit/timeout delay */
	if (tsx->transmit_count == 0) {
	    tsx->retransmit_time.sec = 0;
	    tsx->retransmit_time.msec = tsx->rto_msec;

	} else if (tsx->transmit_count < PJ_STUN_MAX_TRANSMIT_COUNT-1) {
	    unsigned msec;

	    msec = PJ_TIME_VAL_MSEC(tsx->retransmit_time);
	    msec <<= 1;
	    tsx->retransmit_time.sec = msec / 1000;
	    tsx->retransmit_time.msec = msec % 1000;

	} else {
	    tsx->retransmit_time.sec = PJ_STUN_TIMEOUT_VALUE / 1000;
	    tsx->retransmit_time.msec = PJ_STUN_TIMEOUT_VALUE % 1000;
	}

	/* Schedule timer first because when send_msg() failed we can
	 * cancel it (as opposed to when schedule_timer() failed we cannot
	 * cancel transmission).
	 */;
	status = pj_timer_heap_schedule(tsx->timer_heap, 
					&tsx->retransmit_timer,
					&tsx->retransmit_time);
	if (status != PJ_SUCCESS) {
	    tsx->retransmit_timer.id = 0;
	    return status;
	}
	tsx->retransmit_timer.id = TIMER_ACTIVE;
    }


    if (mod_count)
    tsx->transmit_count++;

    PJ_LOG(4,(tsx->obj_name, "[%s] STUN sending message (transmit count=%d)",
	      tsx->obj_name, tsx->transmit_count));

    /* Send message */
    status = tsx->cb.on_send_msg(tsx, tsx->last_pkt, tsx->last_pkt_size);

    if (status == PJNATH_ESTUNDESTROYED) {
	/* We've been destroyed, don't access the object. */
    } else if (status != PJ_SUCCESS && status != PJ_EPENDING) {
	if (tsx->retransmit_timer.id != 0 && mod_count) {
	    pj_timer_heap_cancel(tsx->timer_heap, 
				 &tsx->retransmit_timer);
	    tsx->retransmit_timer.id = 0;
	}
	stun_perror(tsx, "STUN error sending message", status);
	return status;
    }

    return status;
}
Пример #24
0
/*
 * Notify application and shutdown the TCP session.
 */
static void sess_shutdown(pj_tcp_session *sess,
                          pj_status_t status)
{
    pj_bool_t can_destroy = PJ_TRUE;

    PJ_LOG(4,(sess->obj_name, "Request to shutdown in state %s, cause:%d",
              state_names[sess->state], status));

    if (sess->last_status == PJ_SUCCESS && status != PJ_SUCCESS)
        sess->last_status = status;

    switch (sess->state) {
    case PJ_TCP_STATE_NULL:
        break;
    case PJ_TCP_STATE_RESOLVING:
        if (sess->dns_async != NULL) {
            pj_dns_srv_cancel_query(sess->dns_async, PJ_FALSE);
            sess->dns_async = NULL;
        }
        break;
    case PJ_TCP_STATE_RESOLVED:
        break;
    case PJ_TCP_STATE_CONNECTING:
        /* We need to wait until connection complete */
        sess->pending_destroy = PJ_TRUE;
        can_destroy = PJ_FALSE;
        break;
    case PJ_TCP_STATE_READY:
        /* Send REFRESH with LIFETIME=0 */
#if 0
        can_destroy = PJ_FALSE;
        send_refresh(sess, 0);
#endif
        break;
    case PJ_TCP_STATE_DISCONNECTING:
        can_destroy = PJ_FALSE;
        /* This may recursively call this function again with
         * state==PJ_TCP_STATE_DISCONNECTING.
         */
#if 0
        send_refresh(sess, 0);
#endif
        break;
    case PJ_TCP_STATE_DISCONNECTED:
    case PJ_TCP_STATE_DESTROYING:
        break;
    }

    if (can_destroy) {
        /* Schedule destroy */
        pj_time_val delay = {0, 0};

        pj_tcp_session_set_state(sess, PJ_TCP_STATE_DESTROYING);

        if (sess->timer.id != TIMER_NONE) {
            pj_timer_heap_cancel(sess->timer_heap, &sess->timer);
            sess->timer.id = TIMER_NONE;
        }

        sess->timer.id = TIMER_DESTROY;
        pj_timer_heap_schedule(sess->timer_heap, &sess->timer, &delay);
    }
}