示例#1
0
PJ_DEF(pj_status_t) pj_sock_get_qos_params(pj_sock_t sock,
					   pj_qos_params *p_param)
{
    pj_status_t last_err = PJ_ENOTSUP;
    int val, optlen;
    pj_sockaddr sa;
    int salen = sizeof(salen);
    pj_status_t status;

    pj_bzero(p_param, sizeof(*p_param));

    /* Get DSCP/TOS value */
    status = pj_sock_getsockname(sock, &sa, &salen);
    if (status == PJ_SUCCESS) {
	optlen = sizeof(val);
	if (sa.addr.sa_family == pj_AF_INET()) {
	    status = pj_sock_getsockopt(sock, pj_SOL_IP(), pj_IP_TOS(), 
					&val, &optlen);
	} else if (sa.addr.sa_family == pj_AF_INET6()) {
	    status = pj_sock_getsockopt(sock, pj_SOL_IPV6(),
					pj_IPV6_TCLASS(),
					&val, &optlen);
	} else {
	    status = PJ_EINVAL;
	}

        if (status == PJ_SUCCESS) {
    	    p_param->flags |= PJ_QOS_PARAM_HAS_DSCP;
	    p_param->dscp_val = (pj_uint8_t)(val >> 2);
	} else {
示例#2
0
PJ_DEF(pj_status_t) pj_sock_get_qos_params(pj_sock_t sock,
        pj_qos_params *p_param)
{
    pj_status_t status;
    int val, optlen;
    pj_sockaddr sa;
    int salen = sizeof(salen);

    PJ_ASSERT_RETURN(p_param, PJ_EINVAL);

    pj_bzero(p_param, sizeof(*p_param));

    /* Try SO_NET_SERVICE_TYPE */
    status = sock_get_net_service_type_params(sock, p_param);
    if (status != PJ_ENOTSUP)
        return status;

    /* Fall back to IP_TOS/IPV6_TCLASS */
    status = pj_sock_getsockname(sock, &sa, &salen);
    if (status != PJ_SUCCESS)
        return status;

    optlen = sizeof(val);
    if (sa.addr.sa_family == pj_AF_INET()) {
        status = pj_sock_getsockopt(sock, pj_SOL_IP(), pj_IP_TOS(),
                                    &val, &optlen);
    } else if (sa.addr.sa_family == pj_AF_INET6()) {
        status = pj_sock_getsockopt(sock, pj_SOL_IPV6(), pj_IPV6_TCLASS(),
                                    &val, &optlen);
    } else
        status = PJ_EINVAL;
    if (status == PJ_SUCCESS) {
        p_param->flags |= PJ_QOS_PARAM_HAS_DSCP;
        p_param->dscp_val = (pj_uint8_t)(val >> 2);
    }
示例#3
0
/*
 * Adjust socket send/receive buffer size.
 */
PJ_DEF(pj_status_t) pj_sock_setsockopt_sobuf( pj_sock_t sockfd,
					      pj_uint16_t optname,
					      pj_bool_t auto_retry,
					      unsigned *buf_size)
{
    pj_status_t status;
    int try_size, cur_size, i, step, size_len;
    enum { MAX_TRY = 20 };

    PJ_CHECK_STACK();

    PJ_ASSERT_RETURN(sockfd != PJ_INVALID_SOCKET &&
		     buf_size &&
		     *buf_size > 0 &&
		     (optname == pj_SO_RCVBUF() ||
		      optname == pj_SO_SNDBUF()),
		     PJ_EINVAL);

    size_len = sizeof(cur_size);
    status = pj_sock_getsockopt(sockfd, pj_SOL_SOCKET(), optname,
				&cur_size, &size_len);
    if (status != PJ_SUCCESS)
	return status;

    try_size = *buf_size;
    step = (try_size - cur_size) / MAX_TRY;
    if (step < 4096)
	step = 4096;

    for (i = 0; i < (MAX_TRY-1); ++i) {
	if (try_size <= cur_size) {
	    /* Done, return current size */
	    *buf_size = cur_size;
	    break;
	}

	status = pj_sock_setsockopt(sockfd, pj_SOL_SOCKET(), optname,
				    &try_size, sizeof(try_size));
	if (status == PJ_SUCCESS) {
	    status = pj_sock_getsockopt(sockfd, pj_SOL_SOCKET(), optname,
					&cur_size, &size_len);
	    if (status != PJ_SUCCESS) {
		/* Ops! No info about current size, just return last try size
		 * and quit.
		 */
		*buf_size = try_size;
		break;
	    }
	}

	if (!auto_retry)
	    break;

	try_size -= step;
    }

    return status;
}
示例#4
0
void ioqueue_dispatch_exception_event( pj_ioqueue_t *ioqueue, 
                                       pj_ioqueue_key_t *h )
{
    pj_bool_t has_lock;

    pj_mutex_lock(h->mutex);

    if (!h->connecting) {
        /* It is possible that more than one thread was woken up, thus
         * the remaining thread will see h->connecting as zero because
         * it has been processed by other thread.
         */
        pj_mutex_unlock(h->mutex);
        return;
    }

    if (IS_CLOSING(h)) {
	pj_mutex_unlock(h->mutex);
	return;
    }

    /* Clear operation. */
    h->connecting = 0;

    ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT);
    ioqueue_remove_from_set(ioqueue, h, EXCEPTION_EVENT);

    /* Unlock; from this point we don't need to hold key's mutex
     * (unless concurrency is disabled, which in this case we should
     * hold the mutex while calling the callback) */
    if (h->allow_concurrent) {
	/* concurrency may be changed while we're in the callback, so
	 * save it to a flag.
	 */
	has_lock = PJ_FALSE;
	pj_mutex_unlock(h->mutex);
    } else {
	has_lock = PJ_TRUE;
    }

    /* Call callback. */
    if (h->cb.on_connect_complete && !IS_CLOSING(h)) {
	pj_status_t status = -1;
#if (defined(PJ_HAS_SO_ERROR) && PJ_HAS_SO_ERROR!=0)
	int value;
	int vallen = sizeof(value);
	int gs_rc = pj_sock_getsockopt(h->fd, SOL_SOCKET, SO_ERROR, 
				       &value, &vallen);
	if (gs_rc == 0) {
	    status = PJ_RETURN_OS_ERROR(value);
	}
#endif

	(*h->cb.on_connect_complete)(h, status);
    }

    if (has_lock) {
	pj_mutex_unlock(h->mutex);
    }
}
示例#5
0
static pj_status_t ioqueue_init_key( pj_pool_t *pool,
                                     pj_ioqueue_t *ioqueue,
                                     pj_ioqueue_key_t *key,
                                     pj_sock_t sock,
                                     void *user_data,
                                     const pj_ioqueue_callback *cb)
{
    pj_status_t rc;
    int optlen;

    PJ_UNUSED_ARG(pool);

    key->ioqueue = ioqueue;
    key->fd = sock;
    key->user_data = user_data;
    pj_list_init(&key->read_list);
    pj_list_init(&key->write_list);
#if PJ_HAS_TCP
    pj_list_init(&key->accept_list);
    key->connecting = 0;
#endif

    /* Save callback. */
    pj_memcpy(&key->cb, cb, sizeof(pj_ioqueue_callback));

#if PJ_IOQUEUE_HAS_SAFE_UNREG
    /* Set initial reference count to 1 */
    pj_assert(key->ref_count == 0);
    ++key->ref_count;

    key->closing = 0;
#endif

    rc = pj_ioqueue_set_concurrency(key, ioqueue->default_concurrency);
    if (rc != PJ_SUCCESS)
	return rc;

    /* Get socket type. When socket type is datagram, some optimization
     * will be performed during send to allow parallel send operations.
     */
    optlen = sizeof(key->fd_type);
    rc = pj_sock_getsockopt(sock, pj_SOL_SOCKET(), pj_SO_TYPE(),
                            &key->fd_type, &optlen);
    if (rc != PJ_SUCCESS)
        key->fd_type = pj_SOCK_STREAM();

    /* Create mutex for the key. */
#if !PJ_IOQUEUE_HAS_SAFE_UNREG
    rc = pj_mutex_create_simple(pool, NULL, &key->mutex);
#endif
    
    return rc;
}
示例#6
0
static pj_status_t sock_get_net_service_type(pj_sock_t sock, int *p_val)
{
    pj_status_t status;
    int optlen = sizeof(*p_val);

    PJ_ASSERT_RETURN(p_val, PJ_EINVAL);

    status = pj_sock_getsockopt(sock, pj_SOL_SOCKET(), SO_NET_SERVICE_TYPE,
                                p_val, &optlen);
    if (status == PJ_STATUS_FROM_OS(OSERR_ENOPROTOOPT))
        status = PJ_ENOTSUP;

    return status;
}
PJ_DEF(pj_status_t) pj_sock_get_qos_params(pj_sock_t sock,
					   pj_qos_params *p_param)
{
    pj_status_t last_err = PJ_ENOTSUP;
    int val, optlen;
    pj_status_t status;

    pj_bzero(p_param, sizeof(*p_param));

    /* Get DSCP/TOS value */
    optlen = sizeof(val);
    status = pj_sock_getsockopt(sock, pj_SOL_IP(), pj_IP_TOS(), 
				&val, &optlen);
    if (status == PJ_SUCCESS) {
	p_param->flags |= PJ_QOS_PARAM_HAS_DSCP;
	p_param->dscp_val = (pj_uint8_t)(val >> 2);
    } else {
PJ_DEF(pj_status_t) pj_sock_get_qos_type(pj_sock_t sock,
					 pj_qos_type *p_type)
{
    pj_status_t status;
    int value, optlen;
    unsigned i;

    optlen = sizeof(value);
    value = 0;
    status = pj_sock_getsockopt(sock, IPPROTO_IP, IP_DSCP_TRAFFIC_TYPE,
			        &value, &optlen);
    if (status != PJ_SUCCESS)
	return status;

    *p_type = PJ_QOS_TYPE_BEST_EFFORT;
    for (i=0; i<PJ_ARRAY_SIZE(dscp_map); ++i) {
	if (value == dscp_map[i]) {
	    *p_type = (pj_qos_type)i;
	    break;
	}
    }

    return PJ_SUCCESS;
}
示例#9
0
文件: sock.hpp 项目: 12flying/pjsip
    //
    // getsockopt.
    //
    pj_status_t getsockopt(pj_uint16_t level, pj_uint16_t optname, 
                           void *optval, int *optlen)
    {
	return pj_sock_getsockopt(sock_, level, optname, optval, optlen);
    }
示例#10
0
/*
 * ioqueue_dispatch_event()
 *
 * Report occurence of an event in the key to be processed by the
 * framework.
 */
void ioqueue_dispatch_write_event(pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h)
{
    /* Lock the key. */
    pj_mutex_lock(h->mutex);

    if (IS_CLOSING(h)) {
	pj_mutex_unlock(h->mutex);
	return;
    }

#if defined(PJ_HAS_TCP) && PJ_HAS_TCP!=0
    if (h->connecting) {
	/* Completion of connect() operation */
	pj_ssize_t bytes_transfered;
	pj_bool_t has_lock;

	/* Clear operation. */
	h->connecting = 0;

        ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT);
        ioqueue_remove_from_set(ioqueue, h, EXCEPTION_EVENT);


#if (defined(PJ_HAS_SO_ERROR) && PJ_HAS_SO_ERROR!=0)
	/* from connect(2): 
	 * On Linux, use getsockopt to read the SO_ERROR option at
	 * level SOL_SOCKET to determine whether connect() completed
	 * successfully (if SO_ERROR is zero).
	 */
	{
	  int value;
	  int vallen = sizeof(value);
	  int gs_rc = pj_sock_getsockopt(h->fd, SOL_SOCKET, SO_ERROR, 
					 &value, &vallen);
	  if (gs_rc != 0) {
	    /* Argh!! What to do now??? 
	     * Just indicate that the socket is connected. The
	     * application will get error as soon as it tries to use
	     * the socket to send/receive.
	     */
	    bytes_transfered = 0;
	  } else {
            bytes_transfered = value;
	  }
 	}
#elif defined(PJ_WIN32) && PJ_WIN32!=0
	bytes_transfered = 0; /* success */
#else
	/* Excellent information in D.J. Bernstein page:
	 * http://cr.yp.to/docs/connect.html
	 *
	 * Seems like the most portable way of detecting connect()
	 * failure is to call getpeername(). If socket is connected,
	 * getpeername() will return 0. If the socket is not connected,
	 * it will return ENOTCONN, and read(fd, &ch, 1) will produce
	 * the right errno through error slippage. This is a combination
	 * of suggestions from Douglas C. Schmidt and Ken Keys.
	 */
	{
	    int gp_rc;
	    struct sockaddr_in addr;
	    socklen_t addrlen = sizeof(addr);

	    gp_rc = getpeername(h->fd, (struct sockaddr*)&addr, &addrlen);
	    bytes_transfered = (gp_rc < 0) ? gp_rc : -gp_rc;
	}
#endif

        /* Unlock; from this point we don't need to hold key's mutex
	 * (unless concurrency is disabled, which in this case we should
	 * hold the mutex while calling the callback) */
	if (h->allow_concurrent) {
	    /* concurrency may be changed while we're in the callback, so
	     * save it to a flag.
	     */
	    has_lock = PJ_FALSE;
	    pj_mutex_unlock(h->mutex);
	} else {
	    has_lock = PJ_TRUE;
	}

	/* Call callback. */
        if (h->cb.on_connect_complete && !IS_CLOSING(h))
	    (*h->cb.on_connect_complete)(h, bytes_transfered);

	/* Unlock if we still hold the lock */
	if (has_lock) {
	    pj_mutex_unlock(h->mutex);
	}

        /* Done. */

    } else 
#endif /* PJ_HAS_TCP */
    if (key_has_pending_write(h)) {
	/* Socket is writable. */
        struct write_operation *write_op;
        pj_ssize_t sent;
        pj_status_t send_rc;

        /* Get the first in the queue. */
        write_op = h->write_list.next;

        /* For datagrams, we can remove the write_op from the list
         * so that send() can work in parallel.
         */
        if (h->fd_type == pj_SOCK_DGRAM()) {
            pj_list_erase(write_op);

            if (pj_list_empty(&h->write_list))
                ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT);

        }

        /* Send the data. 
         * Unfortunately we must do this while holding key's mutex, thus
         * preventing parallel write on a single key.. :-((
         */
        sent = write_op->size - write_op->written;
        if (write_op->op == PJ_IOQUEUE_OP_SEND) {
            send_rc = pj_sock_send(h->fd, write_op->buf+write_op->written,
                                   &sent, write_op->flags);
	    /* Can't do this. We only clear "op" after we're finished sending
	     * the whole buffer.
	     */
	    //write_op->op = 0;
        } else if (write_op->op == PJ_IOQUEUE_OP_SEND_TO) {
            send_rc = pj_sock_sendto(h->fd, 
                                     write_op->buf+write_op->written,
                                     &sent, write_op->flags,
                                     &write_op->rmt_addr, 
                                     write_op->rmt_addrlen);
	    /* Can't do this. We only clear "op" after we're finished sending
	     * the whole buffer.
	     */
	    //write_op->op = 0;
        } else {
            pj_assert(!"Invalid operation type!");
	    write_op->op = PJ_IOQUEUE_OP_NONE;
            send_rc = PJ_EBUG;
        }

        if (send_rc == PJ_SUCCESS) {
            write_op->written += sent;
        } else {
            pj_assert(send_rc > 0);
            write_op->written = -send_rc;
        }

        /* Are we finished with this buffer? */
        if (send_rc!=PJ_SUCCESS || 
            write_op->written == (pj_ssize_t)write_op->size ||
            h->fd_type == pj_SOCK_DGRAM()) 
        {
	    pj_bool_t has_lock;

	    write_op->op = PJ_IOQUEUE_OP_NONE;

            if (h->fd_type != pj_SOCK_DGRAM()) {
                /* Write completion of the whole stream. */
                pj_list_erase(write_op);

                /* Clear operation if there's no more data to send. */
                if (pj_list_empty(&h->write_list))
                    ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT);

            }

	    /* Unlock; from this point we don't need to hold key's mutex
	     * (unless concurrency is disabled, which in this case we should
	     * hold the mutex while calling the callback) */
	    if (h->allow_concurrent) {
		/* concurrency may be changed while we're in the callback, so
		 * save it to a flag.
		 */
		has_lock = PJ_FALSE;
		pj_mutex_unlock(h->mutex);
	    } else {
		has_lock = PJ_TRUE;
	    }

	    /* Call callback. */
            if (h->cb.on_write_complete && !IS_CLOSING(h)) {
	        (*h->cb.on_write_complete)(h, 
                                           (pj_ioqueue_op_key_t*)write_op,
                                           write_op->written);
            }

	    if (has_lock) {
		pj_mutex_unlock(h->mutex);
	    }

        } else {
            pj_mutex_unlock(h->mutex);
        }

        /* Done. */
    } else {
        /*
         * This is normal; execution may fall here when multiple threads
         * are signalled for the same event, but only one thread eventually
         * able to process the event.
         */
        pj_mutex_unlock(h->mutex);
    }
}