Esempio n. 1
0
static void conn_close(struct sip_conn *conn, int err)
{
	struct le *le;

	conn->sc = mem_deref(conn->sc);
	conn->tc = mem_deref(conn->tc);
	tmr_cancel(&conn->tmr_ka);
	tmr_cancel(&conn->tmr);
	hash_unlink(&conn->he);

	le = list_head(&conn->ql);

	while (le) {

		struct sip_connqent *qent = le->data;
		le = le->next;

		if (qent->qentp) {
			*qent->qentp = NULL;
			qent->qentp = NULL;
		}

		qent->transph(err, qent->arg);
		list_unlink(&qent->le);
		mem_deref(qent);
	}

	sip_keepalive_signal(&conn->kal, err);
}
Esempio n. 2
0
static void destructor(void *arg)
{
	struct sipsess_reply *reply = arg;

	list_unlink(&reply->le);
	tmr_cancel(&reply->tmr);
	tmr_cancel(&reply->tmrg);
	mem_deref((void *)reply->msg);
	mem_deref(reply->mb);
}
Esempio n. 3
0
void allocator_reset(struct allocator *allocator)
{
	if (!allocator)
		return;

	tmr_cancel(&allocator->tmr);
	tmr_cancel(&allocator->tmr_ui);
	tmr_cancel(&allocator->tmr_pace);
	list_flush(&allocator->allocl);
}
Esempio n. 4
0
static void
clear_connection( connecttab* c, struct timeval* tvP )
    {
    ClientData client_data;

    /* If we haven't actually sent the buffered response yet, do so now. */
    httpd_write_response( c->hc );

    if ( c->idle_read_timer != (Timer*) 0 )
	{
	tmr_cancel( c->idle_read_timer );
	c->idle_read_timer = 0;
	}
    if ( c->idle_send_timer != (Timer*) 0 )
	{
	tmr_cancel( c->idle_send_timer );
	c->idle_send_timer = 0;
	}
    if ( c->wakeup_timer != (Timer*) 0 )
	{
	tmr_cancel( c->wakeup_timer );
	c->wakeup_timer = 0;
	}

    /* This is our version of Apache's lingering_close() routine, which is
    ** their version of the often-broken SO_LINGER socket option.  For why
    ** this is necessary, see http://www.apache.org/docs/misc/fin_wait_2.html
    ** What we do is delay the actual closing for a few seconds, while reading
    ** any bytes that come over the connection.  However, we don't want to do
    ** this unless it's necessary, because it ties up a connection slot and
    ** file descriptor which means our maximum connection-handling rate
    ** is lower.  So, elsewhere we set a flag when we detect the few
    ** circumstances that make a lingering close necessary.  If the flag
    ** isn't set we do the real close now.
    */
    if ( c->hc->should_linger )
	{
	c->conn_state = CNST_LINGERING;
	fdwatch_del_fd( c->hc->conn_fd );
	fdwatch_add_fd( c->hc->conn_fd, c, FDW_READ );
	/* Make sure we are still in no-delay mode. */
	httpd_set_ndelay( c->hc->conn_fd );
	client_data.p = c;
	c->linger_timer = tmr_create(
	    tvP, linger_clear_connection, client_data, LINGER_TIME * 1000L, 0 );
	if ( c->linger_timer == (Timer*) 0 )
	    {
	    syslog( LOG_CRIT, "tmr_create(linger_clear_connection) failed" );
	    exit( 1 );
	    }
	}
    else
	really_clear_connection( c, tvP );
    }
Esempio n. 5
0
static void conn_destructor(void *arg)
{
	struct sip_conn *conn = arg;

	tmr_cancel(&conn->tmr_ka);
	tmr_cancel(&conn->tmr);
	list_flush(&conn->kal);
	list_flush(&conn->ql);
	hash_unlink(&conn->he);
	mem_deref(conn->sc);
	mem_deref(conn->tc);
	mem_deref(conn->mb);
}
Esempio n. 6
0
static void clear_connection(struct connect_s *conn, struct timeval *tv)
{
  ClientData client_data;

  if (conn->wakeup_timer != NULL)
    {
      tmr_cancel(conn->wakeup_timer);
      conn->wakeup_timer = 0;
    }

  /* This is our version of Apache's lingering_close() routine, which is
   * their version of the often-broken SO_LINGER socket option.  For why
   * this is necessary, see http://www.apache.org/docs/misc/fin_wait_2.html
   * What we do is delay the actual closing for a few seconds, while reading
   * any bytes that come over the connection.  However, we don't want to do
   * this unless it's necessary, because it ties up a connection slot and
   * file descriptor which means our maximum connection-handling rateis
   * lower.  So, elsewhere we set a flag when we detect the few
   * circumstances that make a lingering close necessary.  If the flag isn't
   * set we do the real close now.
   */

  if (conn->conn_state == CNST_LINGERING)
    {
      /* If we were already lingering, shut down for real */

      tmr_cancel(conn->linger_timer);
      conn->linger_timer      = NULL;
      conn->hc->should_linger = false;
    }
  else if (conn->hc->should_linger)
    {
      fdwatch_del_fd(fw, conn->hc->conn_fd);
      conn->conn_state = CNST_LINGERING;
      fdwatch_add_fd(fw, conn->hc->conn_fd, conn);
      client_data.p = conn;

      conn->linger_timer = tmr_create(tv, linger_clear_connection, client_data,
                                      CONFIG_THTTPD_LINGER_MSEC, 0);
      if (conn->linger_timer != NULL)
        {
          return;
        }

      nerr("ERROR: tmr_create(linger_clear_connection) failed\n");
    }

  /* Either we are done lingering, we shouldn't linger, or we failed to setup the linger */

  really_clear_connection(conn);
}
Esempio n. 7
0
static void destructor(void *arg)
{
	struct sip_ctrans *ct = arg;

	hash_unlink(&ct->he);
	tmr_cancel(&ct->tmr);
	tmr_cancel(&ct->tmre);
	mem_deref(ct->met);
	mem_deref(ct->branch);
	mem_deref(ct->qent);
	mem_deref(ct->req);
	mem_deref(ct->mb);
	mem_deref(ct->mb_ack);
}
Esempio n. 8
0
static void stop_tx(struct autx *tx, struct audio *a)
{
	if (!tx || !a)
		return;

	switch (a->cfg.txmode) {

#ifdef HAVE_PTHREAD
	case AUDIO_MODE_THREAD:
	case AUDIO_MODE_THREAD_REALTIME:
		if (tx->u.thr.run) {
			tx->u.thr.run = false;
			pthread_join(tx->u.thr.tid, NULL);
		}
		break;
#endif
	case AUDIO_MODE_TMR:
		tmr_cancel(&tx->u.tmr);
		break;

	default:
		break;
	}

	/* audio source must be stopped first */
	tx->ausrc = mem_deref(tx->ausrc);
	tx->aubuf = mem_deref(tx->aubuf);

	list_flush(&tx->filtl);
}
Esempio n. 9
0
static void destructor(void *arg)
{
	struct vidisp_st *st = arg;

	tmr_cancel(&st->tmr);
	sdl_reset(st);
}
Esempio n. 10
0
static int module_close(void)
{
    tmr_cancel(&tmr);
    list_flush(&mwil);

    return 0;
}
Esempio n. 11
0
static int req_connect(struct http_req *req)
{
	int err = EINVAL;

	while (req->srvc > 0) {

		--req->srvc;

		tmr_cancel(&req->tmr);
		req->sc = mem_deref(req->sc);
		req->tc = mem_deref(req->tc);
		req->mb = mem_deref(req->mb);

		err = tcp_connect(&req->tc, &req->srvv[req->srvc],
				  estab_handler, recv_handler, close_handler,
				  req);
		if (err)
			continue;

#ifdef USE_TLS
		if (req->secure) {

			err = tls_start_tcp(&req->sc, req->tls, req->tc, 0);
			if (err) {
				req->tc = mem_deref(req->tc);
				continue;
			}
		}
#endif
		tmr_start(&req->tmr, CONN_TIMEOUT, timeout_handler, req);
		break;
	}

	return err;
}
Esempio n. 12
0
static void destructor(void *arg)
{
	struct sipnot *not = arg;

	tmr_cancel(&not->tmr);

	if (!not->terminated) {

		if (terminate(not, SIPEVENT_DEACTIVATED))
			return;
	}

	hash_unlink(&not->he);
	mem_deref(not->req);
	mem_deref(not->dlg);
	mem_deref(not->auth);
	mem_deref(not->mb);
	mem_deref(not->event);
	mem_deref(not->id);
	mem_deref(not->cuser);
	mem_deref(not->hdrs);
	mem_deref(not->ctype);
	mem_deref(not->sock);
	mem_deref(not->sip);
}
Esempio n. 13
0
int sipevent_notify(struct sipnot *not, struct mbuf *mb,
		    enum sipevent_subst state, enum sipevent_reason reason,
		    uint32_t retry_after)
{
	if (!not || not->terminated)
		return EINVAL;

	if (mb || state != SIPEVENT_TERMINATED) {
		mem_deref(not->mb);
		not->mb = mem_ref(mb);
	}

	switch (state) {

	case SIPEVENT_ACTIVE:
	case SIPEVENT_PENDING:
		not->substate = state;
		return sipnot_notify(not);

	case SIPEVENT_TERMINATED:
		tmr_cancel(&not->tmr);
		not->retry_after = retry_after;
		(void)terminate(not, reason);
		return 0;

	default:
		return EINVAL;
	}
}
Esempio n. 14
0
void metric_reset(struct metric *metric)
{
	if (!metric)
		return;

	tmr_cancel(&metric->tmr);
}
Esempio n. 15
0
void tmr_run(struct timeval *nowP)
{
	int h;
	Timer *t;
	Timer *next;

	for (h = 0; h < HASH_SIZE; ++h)
		for (t = timers[h]; t; t = next) {
			next = t->next;
			/* Since the lists are sorted, as soon as we find a timer
			** that isn't ready yet, we can go on to the next list.
			*/
			if (t->time.tv_sec > nowP->tv_sec || (t->time.tv_sec == nowP->tv_sec &&
							      t->time.tv_usec > nowP->tv_usec))
				break;

			(t->timer_proc) (t->arg, nowP);
			if (t->periodic) {
				/* Reschedule. */
				t->time.tv_sec  +=  t->msecs / 1000L;
				t->time.tv_usec += (t->msecs % 1000L) * 1000L;
				if (t->time.tv_usec >= 1000000L) {
					t->time.tv_sec += t->time.tv_usec / 1000000L;
					t->time.tv_usec %= 1000000L;
				}
				l_resort(t);
			} else
				tmr_cancel(t);
		}
}
Esempio n. 16
0
static void call_destructor(void *arg)
{
	struct call *call = arg;

	if (call->state != STATE_IDLE)
		print_summary(call);

	call_stream_stop(call);
	list_unlink(&call->le);
	tmr_cancel(&call->tmr_dtmf);

	mem_deref(call->sess);
	mem_deref(call->local_uri);
	mem_deref(call->local_name);
	mem_deref(call->peer_uri);
	mem_deref(call->peer_name);
	mem_deref(call->audio);
#ifdef USE_VIDEO
	mem_deref(call->video);
	mem_deref(call->bfcp);
#endif
	mem_deref(call->sdp);
	mem_deref(call->mnats);
	mem_deref(call->mencs);
	mem_deref(call->sub);
	mem_deref(call->not);
	mem_deref(call->acc);
}
Esempio n. 17
0
void
tmr_run( struct timeval* nowP )
    {
    Timer* t;
    Timer* next;

    for ( t = timers; t != (Timer*) 0; t = next )
	{
	next = t->next;
	if ( t->time.tv_sec < nowP->tv_sec ||
	     ( t->time.tv_sec == nowP->tv_sec &&
	       t->time.tv_usec < nowP->tv_usec ) )
	    {
	    (t->timer_proc)( t->client_data, nowP );
	    if ( t->periodic )
		{
		/* Reschedule. */
		t->time.tv_sec += t->msecs / 1000L;
		t->time.tv_usec += ( t->msecs % 1000L ) * 1000L;
		if ( t->time.tv_usec >= 1000000L )
		    {
		    t->time.tv_sec += t->time.tv_usec / 1000000L;
		    t->time.tv_usec %= 1000000L;
		    }
		}
	    else
		tmr_cancel( t );
	    }
	}
    }
Esempio n. 18
0
void
tmr_destroy( void )
    {
    while ( timers != (Timer*) 0 )
	tmr_cancel( timers );
    tmr_cleanup();
    }
Esempio n. 19
0
static void destructor(void *arg)
{
	struct bfcp_ctrans *ct = arg;

	list_unlink(&ct->le);
	tmr_cancel(&ct->tmr);
}
Esempio n. 20
0
static void destructor(void *arg)
{
	struct sipreg *reg = arg;

	tmr_cancel(&reg->tmr);

	if (!reg->terminated) {

		reg->resph = dummy_handler;
		reg->terminated = true;

		if (reg->req) {
			mem_ref(reg);
			return;
		}

		if (reg->registered && !request(reg, true)) {
			mem_ref(reg);
			return;
		}
	}

	mem_deref(reg->ka);
	mem_deref(reg->dlg);
	mem_deref(reg->auth);
	mem_deref(reg->cuser);
	mem_deref(reg->sip);
	mem_deref(reg->hdrs);
	mem_deref(reg->params);
}
Esempio n. 21
0
void
tmr_run(struct timeval *nowP)
{
  int h;
  Timer *t;
  Timer *next;

  for (h = 0; h < HASH_SIZE; ++h)
    for (t = timers[h]; t != (Timer *)0; t = next) {
      next = t->next;
      /* Since the lists are sorted, as soon as we find a timer
       ** that isn't ready yet, we can go on to the next list.
       */
      if (t->time.tv_sec > nowP->tv_sec || (t->time.tv_sec == nowP->tv_sec && t->time.tv_usec > nowP->tv_usec))
        break;

      /* Invalidate mstimeout cache, since we're modifying the queue */
      mstimeout_cache = -1;

      (t->timer_proc)(t->client_data, nowP);
      if (t->periodic) {
        /* Reschedule. */
        t->time.tv_sec += t->msecs / 1000L;
        t->time.tv_usec += (t->msecs % 1000L) * 1000L;
        if (t->time.tv_usec >= 1000000L) {
          t->time.tv_sec += t->time.tv_usec / 1000000L;
          t->time.tv_usec %= 1000000L;
        }
        l_resort(t);
      } else
        tmr_cancel(t);
    }
}
Esempio n. 22
0
static void invite_response(struct sip_ctrans *ct, const struct sip_msg *msg)
{
	switch (ct->state) {

	case CALLING:
		tmr_cancel(&ct->tmr);
		tmr_cancel(&ct->tmre);
		/*@fallthrough@*/
	case PROCEEDING:
		if (msg->scode < 200) {
			ct->state = PROCEEDING;
			ct->resph(0, msg, ct->arg);
		}
		else if (msg->scode < 300) {
			ct->resph(0, msg, ct->arg);
			mem_deref(ct);
		}
		else {
			ct->state = COMPLETED;

			(void)request_copy(&ct->mb_ack, ct, "ACK", msg);
			(void)sip_send(ct->sip, NULL, ct->tp, &ct->dst,
				       ct->mb_ack);

			ct->resph(0, msg, ct->arg);

			if (sip_transp_reliable(ct->tp)) {
				mem_deref(ct);
				break;
			}

			tmr_start(&ct->tmr, COMPLETE_WAIT, tmr_handler, ct);
		}
		break;

	case COMPLETED:
		if (msg->scode < 300)
			break;

		(void)sip_send(ct->sip, NULL, ct->tp, &ct->dst, ct->mb_ack);
		break;

	default:
		break;
	}
}
Esempio n. 23
0
static void destructor(void *arg)
{
	struct perm *perm = arg;

	tmr_cancel(&perm->tmr);
	mem_deref(perm->ct);
	hash_unlink(&perm->he);
}
Esempio n. 24
0
static int module_close(void)
{
	uag_event_unregister(ua_event_handler);
	tmr_cancel(&tmr);
	list_flush(&mwil);

	return 0;
}
Esempio n. 25
0
void allocator_stop_senders(struct allocator *allocator)
{
	struct le *le;

	if (!allocator)
		return;

	tmr_cancel(&allocator->tmr_ui);
	tmr_cancel(&allocator->tmr_pace);

	for (le = allocator->allocl.head; le; le = le->next) {
		struct allocation *alloc = le->data;

		sender_stop(alloc->sender);
	}

}
Esempio n. 26
0
static void session_destructor(void *arg)
{
	struct menc_sess *st = arg;

	tmr_cancel(&st->abort_timer);

	if (st->zrtp_session)
		zrtp_session_down(st->zrtp_session);
}
Esempio n. 27
0
static void destructor(void *arg)
{
	struct publisher *pub = arg;

	list_unlink(&pub->le);
	tmr_cancel(&pub->tmr);
	mem_deref(pub->ua);
	mem_deref(pub->etag);
}
static void udpconn_close(struct sip_udpconn *uc, int err)
{
	sip_keepalive_signal(&uc->kal, err);
	hash_unlink(&uc->he);
	tmr_cancel(&uc->tmr_ka);
	uc->ct = mem_deref(uc->ct);
	uc->us = mem_deref(uc->us);
	uc->stun = mem_deref(uc->stun);
}
Esempio n. 29
0
static void destructor(void *arg)
{
	struct presence *pres = arg;

	list_unlink(&pres->le);
	tmr_cancel(&pres->tmr);
	mem_deref(pres->contact);
	mem_deref(pres->sub);
}
Esempio n. 30
0
static void tcpconn_destructor(void *arg)
{
	struct tcpconn *tc = arg;

	hash_unlink(&tc->le);
	tmr_cancel(&tc->tmr);
	mem_deref(tc->conn);
	mem_deref(tc->mb);
}