/* * pj_ioqueue_register_sock() * * Register socket handle to ioqueue. */ PJ_DEF(pj_status_t) pj_ioqueue_register_sock( pj_pool_t *pool, pj_ioqueue_t *ioqueue, pj_sock_t sock, void *user_data, const pj_ioqueue_callback *cb, pj_ioqueue_key_t **p_key) { pj_ioqueue_key_t *key = NULL; #if defined(PJ_WIN32) && PJ_WIN32!=0 || \ defined(PJ_WIN32_WINCE) && PJ_WIN32_WINCE!=0 u_long value; #else pj_uint32_t value; #endif pj_status_t rc = PJ_SUCCESS; PJ_ASSERT_RETURN(pool && ioqueue && sock != PJ_INVALID_SOCKET && cb && p_key, PJ_EINVAL); pj_lock_acquire(ioqueue->lock); if (ioqueue->count >= ioqueue->max) { rc = PJ_ETOOMANY; goto on_return; } /* If safe unregistration (PJ_IOQUEUE_HAS_SAFE_UNREG) is used, get * the key from the free list. Otherwise allocate a new one. */ #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Scan closing_keys first to let them come back to free_list */ scan_closing_keys(ioqueue); pj_assert(!pj_list_empty(&ioqueue->free_list)); if (pj_list_empty(&ioqueue->free_list)) { rc = PJ_ETOOMANY; goto on_return; } key = ioqueue->free_list.next; pj_list_erase(key); #else key = (pj_ioqueue_key_t*)pj_pool_zalloc(pool, sizeof(pj_ioqueue_key_t)); #endif rc = ioqueue_init_key(pool, ioqueue, key, sock, user_data, cb); if (rc != PJ_SUCCESS) { key = NULL; goto on_return; } /* Set socket to nonblocking. */ value = 1; #if defined(PJ_WIN32) && PJ_WIN32!=0 || \ defined(PJ_WIN32_WINCE) && PJ_WIN32_WINCE!=0 if (ioctlsocket(sock, FIONBIO, &value)) { #else if (ioctl(sock, FIONBIO, &value)) { #endif rc = pj_get_netos_error(); goto on_return; } /* Put in active list. */ pj_list_insert_before(&ioqueue->active_list, key); ++ioqueue->count; /* Rescan fdset to get max descriptor */ rescan_fdset(ioqueue); on_return: /* On error, socket may be left in non-blocking mode. */ *p_key = key; pj_lock_release(ioqueue->lock); return rc; } #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Increment key's reference counter */ static void increment_counter(pj_ioqueue_key_t *key) { pj_mutex_lock(key->ioqueue->ref_cnt_mutex); ++key->ref_count; pj_mutex_unlock(key->ioqueue->ref_cnt_mutex); }
/* * pj_ioqueue_unregister() */ PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_key_t *key ) { unsigned i; pj_bool_t has_lock; enum { RETRY = 10 }; PJ_ASSERT_RETURN(key, PJ_EINVAL); #if PJ_HAS_TCP if (key->connecting) { unsigned pos; pj_ioqueue_t *ioqueue; ioqueue = key->ioqueue; /* Erase from connecting_handles */ pj_lock_acquire(ioqueue->lock); for (pos=0; pos < ioqueue->connecting_count; ++pos) { if (ioqueue->connecting_keys[pos] == key) { erase_connecting_socket(ioqueue, pos); break; } } key->connecting = 0; pj_lock_release(ioqueue->lock); } #endif #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Mark key as closing before closing handle. */ key->closing = 1; /* If concurrency is disabled, wait until the key has finished * processing the callback */ if (key->allow_concurrent == PJ_FALSE) { pj_mutex_lock(key->mutex); has_lock = PJ_TRUE; } else { has_lock = PJ_FALSE; } #else PJ_UNUSED_ARG(has_lock); #endif /* Close handle (the only way to disassociate handle from IOCP). * We also need to close handle to make sure that no further events * will come to the handle. */ /* Update 2008/07/18 (http://trac.pjsip.org/repos/ticket/575): * - It seems that CloseHandle() in itself does not actually close * the socket (i.e. it will still appear in "netstat" output). Also * if we only use CloseHandle(), an "Invalid Handle" exception will * be raised in WSACleanup(). * - MSDN documentation says that CloseHandle() must be called after * closesocket() call (see * http://msdn.microsoft.com/en-us/library/ms724211(VS.85).aspx). * But turns out that this will raise "Invalid Handle" exception * in debug mode. * So because of this, we replaced CloseHandle() with closesocket() * instead. These was tested on WinXP SP2. */ //CloseHandle(key->hnd); pj_sock_close((pj_sock_t)key->hnd); /* Reset callbacks */ key->cb.on_accept_complete = NULL; key->cb.on_connect_complete = NULL; key->cb.on_read_complete = NULL; key->cb.on_write_complete = NULL; #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Even after handle is closed, I suspect that IOCP may still try to * do something with the handle, causing memory corruption when pool * debugging is enabled. * * Forcing context switch seems to have fixed that, but this is quite * an ugly solution.. * * Update 2008/02/13: * This should not happen if concurrency is disallowed for the key. * So at least application has a solution for this (i.e. by disallowing * concurrency in the key). */ //This will loop forever if unregistration is done on the callback. //Doing this with RETRY I think should solve the IOCP setting the //socket signalled, without causing the deadlock. //while (pj_atomic_get(key->ref_count) != 1) // pj_thread_sleep(0); for (i=0; pj_atomic_get(key->ref_count) != 1 && i<RETRY; ++i) pj_thread_sleep(0); /* Decrement reference counter to destroy the key. */ decrement_counter(key); if (has_lock) pj_mutex_unlock(key->mutex); #endif return PJ_SUCCESS; }
/* Callback to be called to handle new incoming requests. */ static pj_bool_t proxy_on_rx_request( pjsip_rx_data *rdata ) { pjsip_transaction *uas_tsx, *uac_tsx; struct uac_data *uac_data; struct uas_data *uas_data; pjsip_tx_data *tdata; pj_status_t status; if (rdata->msg_info.msg->line.req.method.id != PJSIP_CANCEL_METHOD) { /* Verify incoming request */ status = proxy_verify_request(rdata); if (status != PJ_SUCCESS) { app_perror("RX invalid request", status); return PJ_TRUE; } /* * Request looks sane, next clone the request to create transmit data. */ status = pjsip_endpt_create_request_fwd(global.endpt, rdata, NULL, NULL, 0, &tdata); if (status != PJ_SUCCESS) { pjsip_endpt_respond_stateless(global.endpt, rdata, PJSIP_SC_INTERNAL_SERVER_ERROR, NULL, NULL, NULL); return PJ_TRUE; } /* Process routing */ status = proxy_process_routing(tdata); if (status != PJ_SUCCESS) { app_perror("Error processing route", status); return PJ_TRUE; } /* Calculate target */ status = proxy_calculate_target(rdata, tdata); if (status != PJ_SUCCESS) { app_perror("Error calculating target", status); return PJ_TRUE; } /* Everything is set to forward the request. */ /* If this is an ACK request, forward statelessly. * This happens if the proxy records route and this ACK * is sent for 2xx response. An ACK that is sent for non-2xx * final response will be absorbed by transaction layer, and * it will not be received by on_rx_request() callback. */ if (tdata->msg->line.req.method.id == PJSIP_ACK_METHOD) { status = pjsip_endpt_send_request_stateless(global.endpt, tdata, NULL, NULL); if (status != PJ_SUCCESS) { app_perror("Error forwarding request", status); return PJ_TRUE; } return PJ_TRUE; } /* Create UAC transaction for forwarding the request. * Set our module as the transaction user to receive further * events from this transaction. */ status = pjsip_tsx_create_uac(&mod_tu, tdata, &uac_tsx); if (status != PJ_SUCCESS) { pjsip_tx_data_dec_ref(tdata); pjsip_endpt_respond_stateless(global.endpt, rdata, PJSIP_SC_INTERNAL_SERVER_ERROR, NULL, NULL, NULL); return PJ_TRUE; } /* Create UAS transaction to handle incoming request */ status = pjsip_tsx_create_uas(&mod_tu, rdata, &uas_tsx); if (status != PJ_SUCCESS) { pjsip_tx_data_dec_ref(tdata); pjsip_endpt_respond_stateless(global.endpt, rdata, PJSIP_SC_INTERNAL_SERVER_ERROR, NULL, NULL, NULL); pjsip_tsx_terminate(uac_tsx, PJSIP_SC_INTERNAL_SERVER_ERROR); return PJ_TRUE; } /* Feed the request to the UAS transaction to drive it's state * out of NULL state. */ pjsip_tsx_recv_msg(uas_tsx, rdata); /* Attach a data to the UAC transaction, to be used to find the * UAS transaction when we receive response in the UAC side. */ uac_data = (struct uac_data*) pj_pool_alloc(uac_tsx->pool, sizeof(struct uac_data)); uac_data->uas_tsx = uas_tsx; uac_tsx->mod_data[mod_tu.id] = (void*)uac_data; /* Attach data to the UAS transaction, to find the UAC transaction * when cancelling INVITE request. */ uas_data = (struct uas_data*) pj_pool_alloc(uas_tsx->pool, sizeof(struct uas_data)); uas_data->uac_tsx = uac_tsx; uas_tsx->mod_data[mod_tu.id] = (void*)uas_data; /* Everything is setup, forward the request */ status = pjsip_tsx_send_msg(uac_tsx, tdata); if (status != PJ_SUCCESS) { pjsip_tx_data *err_res; /* Fail to send request, for some reason */ /* Destroy transmit data */ pjsip_tx_data_dec_ref(tdata); /* I think UAC transaction should have been destroyed when * it fails to send request, so no need to destroy it. pjsip_tsx_terminate(uac_tsx, PJSIP_SC_INTERNAL_SERVER_ERROR); */ /* Send 500/Internal Server Error to UAS transaction */ pjsip_endpt_create_response(global.endpt, rdata, 500, NULL, &err_res); pjsip_tsx_send_msg(uas_tsx, err_res); return PJ_TRUE; } /* Send 100/Trying if this is an INVITE */ if (rdata->msg_info.msg->line.req.method.id == PJSIP_INVITE_METHOD) { pjsip_tx_data *res100; pjsip_endpt_create_response(global.endpt, rdata, 100, NULL, &res100); pjsip_tsx_send_msg(uas_tsx, res100); } } else { /* This is CANCEL request */ pjsip_transaction *invite_uas; struct uas_data *uas_data; pj_str_t key; /* Find the UAS INVITE transaction */ pjsip_tsx_create_key(rdata->tp_info.pool, &key, PJSIP_UAS_ROLE, pjsip_get_invite_method(), rdata); invite_uas = pjsip_tsx_layer_find_tsx(&key, PJ_TRUE); if (!invite_uas) { /* Invite transaction not found, respond CANCEL with 481 */ pjsip_endpt_respond_stateless(global.endpt, rdata, 481, NULL, NULL, NULL); return PJ_TRUE; } /* Respond 200 OK to CANCEL */ pjsip_endpt_respond(global.endpt, NULL, rdata, 200, NULL, NULL, NULL, NULL); /* Send CANCEL to cancel the UAC transaction. * The UAS INVITE transaction will get final response when * we receive final response from the UAC INVITE transaction. */ uas_data = (struct uas_data*) invite_uas->mod_data[mod_tu.id]; if (uas_data->uac_tsx && uas_data->uac_tsx->status_code < 200) { pjsip_tx_data *cancel; pj_mutex_lock(uas_data->uac_tsx->mutex); pjsip_endpt_create_cancel(global.endpt, uas_data->uac_tsx->last_tx, &cancel); pjsip_endpt_send_request(global.endpt, cancel, -1, NULL, NULL); pj_mutex_unlock(uas_data->uac_tsx->mutex); } /* Unlock UAS tsx because it is locked in find_tsx() */ pj_mutex_unlock(invite_uas->mutex); } return PJ_TRUE; }
/* * pj_ioqueue_recvfrom() * * Start asynchronous recvfrom() from the socket. */ PJ_DEF(pj_status_t) pj_ioqueue_recvfrom( pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, void *buffer, pj_ssize_t *length, unsigned flags, pj_sockaddr_t *addr, int *addrlen) { struct read_operation *read_op; PJ_ASSERT_RETURN(key && op_key && buffer && length, PJ_EINVAL); PJ_CHECK_STACK(); /* Check if key is closing. */ if (IS_CLOSING(key)) return PJ_ECANCELLED; read_op = (struct read_operation*)op_key; read_op->op = PJ_IOQUEUE_OP_NONE; /* Try to see if there's data immediately available. */ if ((flags & PJ_IOQUEUE_ALWAYS_ASYNC) == 0) { pj_status_t status; pj_ssize_t size; size = *length; status = pj_sock_recvfrom(key->fd, buffer, &size, flags, addr, addrlen); if (status == PJ_SUCCESS) { /* Yes! Data is available! */ *length = size; return PJ_SUCCESS; } else { /* If error is not EWOULDBLOCK (or EAGAIN on Linux), report * the error to caller. */ if (status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL)) return status; } } flags &= ~(PJ_IOQUEUE_ALWAYS_ASYNC); /* * No data is immediately available. * Must schedule asynchronous operation to the ioqueue. */ read_op->op = PJ_IOQUEUE_OP_RECV_FROM; read_op->buf = buffer; read_op->size = *length; read_op->flags = flags; read_op->rmt_addr = addr; read_op->rmt_addrlen = addrlen; pj_mutex_lock(key->mutex); pj_list_insert_before(&key->read_list, read_op); ioqueue_add_to_set(key->ioqueue, key, READABLE_EVENT); pj_mutex_unlock(key->mutex); return PJ_EPENDING; }
/* * Initiate overlapped accept() operation. */ PJ_DEF(pj_status_t) pj_ioqueue_accept( pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, pj_sock_t *new_sock, pj_sockaddr_t *local, pj_sockaddr_t *remote, int *addrlen) { struct accept_operation *accept_op; pj_status_t status; /* check parameters. All must be specified! */ PJ_ASSERT_RETURN(key && op_key && new_sock, PJ_EINVAL); /* Check if key is closing. */ if (IS_CLOSING(key)) return PJ_ECANCELLED; accept_op = (struct accept_operation*)op_key; accept_op->op = PJ_IOQUEUE_OP_NONE; /* Fast track: * See if there's new connection available immediately. */ if (pj_list_empty(&key->accept_list)) { status = pj_sock_accept(key->fd, new_sock, remote, addrlen); if (status == PJ_SUCCESS) { /* Yes! New connection is available! */ if (local && addrlen) { status = pj_sock_getsockname(*new_sock, local, addrlen); if (status != PJ_SUCCESS) { pj_sock_close(*new_sock); *new_sock = PJ_INVALID_SOCKET; return status; } } return PJ_SUCCESS; } else { /* If error is not EWOULDBLOCK (or EAGAIN on Linux), report * the error to caller. */ if (status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL)) { return status; } } } /* * No connection is available immediately. * Schedule accept() operation to be completed when there is incoming * connection available. */ accept_op->op = PJ_IOQUEUE_OP_ACCEPT; accept_op->accept_fd = new_sock; accept_op->rmt_addr = remote; accept_op->addrlen= addrlen; accept_op->local_addr = local; pj_mutex_lock(key->mutex); pj_list_insert_before(&key->accept_list, accept_op); ioqueue_add_to_set(key->ioqueue, key, READABLE_EVENT); pj_mutex_unlock(key->mutex); return PJ_EPENDING; }
/* * Perform unregistration test. * * This will create ioqueue and register a server socket. Depending * on the test method, either the callback or the main thread will * unregister and destroy the server socket after some period of time. */ static int perform_unreg_test(pj_ioqueue_t *ioqueue, pj_pool_t *test_pool, const char *title, pj_bool_t other_socket) { enum { WORKER_CNT = 1, MSEC = 500, QUIT_MSEC = 500 }; int i; pj_thread_t *thread[WORKER_CNT]; struct sock_data osd; pj_ioqueue_callback callback; pj_time_val end_time; pj_status_t status; /* Sometimes its important to have other sockets registered to * the ioqueue, because when no sockets are registered, the ioqueue * will return from the poll early. */ if (other_socket) { status = app_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, 56127, &osd.sock); if (status != PJ_SUCCESS) { app_perror("Error creating other socket", status); return -12; } pj_bzero(&callback, sizeof(callback)); status = pj_ioqueue_register_sock(test_pool, ioqueue, osd.sock, NULL, &callback, &osd.key); if (status != PJ_SUCCESS) { app_perror("Error registering other socket", status); return -13; } } else { osd.key = NULL; osd.sock = PJ_INVALID_SOCKET; } /* Init both time duration of testing */ thread_quitting = 0; pj_gettimeofday(&time_to_unregister); time_to_unregister.msec += MSEC; pj_time_val_normalize(&time_to_unregister); end_time = time_to_unregister; end_time.msec += QUIT_MSEC; pj_time_val_normalize(&end_time); /* Create polling thread */ for (i=0; i<WORKER_CNT; ++i) { status = pj_thread_create(test_pool, "unregtest", &worker_thread, ioqueue, 0, 0, &thread[i]); if (status != PJ_SUCCESS) { app_perror("Error creating thread", status); return -20; } } /* Create pair of client/server sockets */ status = app_socketpair(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &sock_data.sock, &sock_data.csock); if (status != PJ_SUCCESS) { app_perror("app_socketpair error", status); return -30; } /* Initialize test data */ sock_data.pool = pj_pool_create(mem, "sd", 1000, 1000, NULL); sock_data.buffer = (char*) pj_pool_alloc(sock_data.pool, 128); sock_data.bufsize = 128; sock_data.op_key = (pj_ioqueue_op_key_t*) pj_pool_alloc(sock_data.pool, sizeof(*sock_data.op_key)); sock_data.received = 0; sock_data.unregistered = 0; pj_ioqueue_op_key_init(sock_data.op_key, sizeof(*sock_data.op_key)); status = pj_mutex_create_simple(sock_data.pool, "sd", &sock_data.mutex); if (status != PJ_SUCCESS) { app_perror("create_mutex() error", status); return -35; } /* Register socket to ioqueue */ pj_bzero(&callback, sizeof(callback)); callback.on_read_complete = &on_read_complete; status = pj_ioqueue_register_sock(sock_data.pool, ioqueue, sock_data.sock, NULL, &callback, &sock_data.key); if (status != PJ_SUCCESS) { app_perror("pj_ioqueue_register error", status); return -40; } /* Bootstrap the first send/receive */ on_read_complete(sock_data.key, sock_data.op_key, 0); /* Loop until test time ends */ for (;;) { pj_time_val now, timeout; pj_gettimeofday(&now); if (test_method == UNREGISTER_IN_APP && PJ_TIME_VAL_GTE(now, time_to_unregister) && sock_data.pool) { pj_mutex_lock(sock_data.mutex); sock_data.unregistered = 1; pj_ioqueue_unregister(sock_data.key); pj_mutex_unlock(sock_data.mutex); pj_mutex_destroy(sock_data.mutex); pj_pool_release(sock_data.pool); sock_data.pool = NULL; } if (PJ_TIME_VAL_GT(now, end_time) && sock_data.unregistered) break; timeout.sec = 0; timeout.msec = 10; pj_ioqueue_poll(ioqueue, &timeout); //pj_thread_sleep(1); } thread_quitting = 1; for (i=0; i<WORKER_CNT; ++i) { pj_thread_join(thread[i]); pj_thread_destroy(thread[i]); } if (other_socket) { pj_ioqueue_unregister(osd.key); } pj_sock_close(sock_data.csock); PJ_LOG(3,(THIS_FILE, "....%s: done (%d KB/s)", title, sock_data.received * 1000 / MSEC / 1000)); return 0; }
/* * ioqueue_dispatch_event() * * Report occurence of an event in the key to be processed by the * framework. */ void ioqueue_dispatch_write_event(pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h) { /* Lock the key. */ pj_mutex_lock(h->mutex); if (IS_CLOSING(h)) { pj_mutex_unlock(h->mutex); return; } #if defined(PJ_HAS_TCP) && PJ_HAS_TCP!=0 if (h->connecting) { /* Completion of connect() operation */ pj_ssize_t bytes_transfered; pj_bool_t has_lock; /* Clear operation. */ h->connecting = 0; ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT); ioqueue_remove_from_set(ioqueue, h, EXCEPTION_EVENT); #if (defined(PJ_HAS_SO_ERROR) && PJ_HAS_SO_ERROR!=0) /* from connect(2): * On Linux, use getsockopt to read the SO_ERROR option at * level SOL_SOCKET to determine whether connect() completed * successfully (if SO_ERROR is zero). */ { int value; int vallen = sizeof(value); int gs_rc = pj_sock_getsockopt(h->fd, SOL_SOCKET, SO_ERROR, &value, &vallen); if (gs_rc != 0) { /* Argh!! What to do now??? * Just indicate that the socket is connected. The * application will get error as soon as it tries to use * the socket to send/receive. */ bytes_transfered = 0; } else { bytes_transfered = value; } } #elif defined(PJ_WIN32) && PJ_WIN32!=0 bytes_transfered = 0; /* success */ #else /* Excellent information in D.J. Bernstein page: * http://cr.yp.to/docs/connect.html * * Seems like the most portable way of detecting connect() * failure is to call getpeername(). If socket is connected, * getpeername() will return 0. If the socket is not connected, * it will return ENOTCONN, and read(fd, &ch, 1) will produce * the right errno through error slippage. This is a combination * of suggestions from Douglas C. Schmidt and Ken Keys. */ { int gp_rc; struct sockaddr_in addr; socklen_t addrlen = sizeof(addr); gp_rc = getpeername(h->fd, (struct sockaddr*)&addr, &addrlen); bytes_transfered = (gp_rc < 0) ? gp_rc : -gp_rc; } #endif /* Unlock; from this point we don't need to hold key's mutex * (unless concurrency is disabled, which in this case we should * hold the mutex while calling the callback) */ if (h->allow_concurrent) { /* concurrency may be changed while we're in the callback, so * save it to a flag. */ has_lock = PJ_FALSE; pj_mutex_unlock(h->mutex); } else { has_lock = PJ_TRUE; } /* Call callback. */ if (h->cb.on_connect_complete && !IS_CLOSING(h)) (*h->cb.on_connect_complete)(h, bytes_transfered); /* Unlock if we still hold the lock */ if (has_lock) { pj_mutex_unlock(h->mutex); } /* Done. */ } else #endif /* PJ_HAS_TCP */ if (key_has_pending_write(h)) { /* Socket is writable. */ struct write_operation *write_op; pj_ssize_t sent; pj_status_t send_rc; /* Get the first in the queue. */ write_op = h->write_list.next; /* For datagrams, we can remove the write_op from the list * so that send() can work in parallel. */ if (h->fd_type == pj_SOCK_DGRAM()) { pj_list_erase(write_op); if (pj_list_empty(&h->write_list)) ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT); } /* Send the data. * Unfortunately we must do this while holding key's mutex, thus * preventing parallel write on a single key.. :-(( */ sent = write_op->size - write_op->written; if (write_op->op == PJ_IOQUEUE_OP_SEND) { send_rc = pj_sock_send(h->fd, write_op->buf+write_op->written, &sent, write_op->flags); /* Can't do this. We only clear "op" after we're finished sending * the whole buffer. */ //write_op->op = 0; } else if (write_op->op == PJ_IOQUEUE_OP_SEND_TO) { send_rc = pj_sock_sendto(h->fd, write_op->buf+write_op->written, &sent, write_op->flags, &write_op->rmt_addr, write_op->rmt_addrlen); /* Can't do this. We only clear "op" after we're finished sending * the whole buffer. */ //write_op->op = 0; } else { pj_assert(!"Invalid operation type!"); write_op->op = PJ_IOQUEUE_OP_NONE; send_rc = PJ_EBUG; } if (send_rc == PJ_SUCCESS) { write_op->written += sent; } else { pj_assert(send_rc > 0); write_op->written = -send_rc; } /* Are we finished with this buffer? */ if (send_rc!=PJ_SUCCESS || write_op->written == (pj_ssize_t)write_op->size || h->fd_type == pj_SOCK_DGRAM()) { pj_bool_t has_lock; write_op->op = PJ_IOQUEUE_OP_NONE; if (h->fd_type != pj_SOCK_DGRAM()) { /* Write completion of the whole stream. */ pj_list_erase(write_op); /* Clear operation if there's no more data to send. */ if (pj_list_empty(&h->write_list)) ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT); } /* Unlock; from this point we don't need to hold key's mutex * (unless concurrency is disabled, which in this case we should * hold the mutex while calling the callback) */ if (h->allow_concurrent) { /* concurrency may be changed while we're in the callback, so * save it to a flag. */ has_lock = PJ_FALSE; pj_mutex_unlock(h->mutex); } else { has_lock = PJ_TRUE; } /* Call callback. */ if (h->cb.on_write_complete && !IS_CLOSING(h)) { (*h->cb.on_write_complete)(h, (pj_ioqueue_op_key_t*)write_op, write_op->written); } if (has_lock) { pj_mutex_unlock(h->mutex); } } else { pj_mutex_unlock(h->mutex); } /* Done. */ } else { /* * This is normal; execution may fall here when multiple threads * are signalled for the same event, but only one thread eventually * able to process the event. */ pj_mutex_unlock(h->mutex); } }
static pj_status_t open_openh264_codec(openh264_private *ff, pj_mutex_t *ff_mutex) { pjmedia_video_format_detail *vfd; pj_bool_t enc_opened = PJ_FALSE, dec_opened = PJ_FALSE; pj_status_t status; vfd = pjmedia_format_get_video_format_detail(&ff->param.enc_fmt, PJ_TRUE); /* Override generic params or apply specific params before opening * the codec. */ if (ff->desc->preopen) { status = (*ff->desc->preopen)(ff); if (status != PJ_SUCCESS) goto on_error; } /* Open encoder */ if (ff->param.dir & PJMEDIA_DIR_ENCODING) { int err; SEncParamExt *param = &ff->enc_param; const openh264_codec_desc *desc = &ff->desc[0]; bool disable = 0; int iIndexLayer = 0; SSourcePicture *srcPic; pj_mutex_lock(ff_mutex); memset(param, 0x00, sizeof(SEncParamExt)); CreateSVCEncoder(&ff->enc); /* Test for temporal, spatial, SNR scalability */ param->fMaxFrameRate = (float)vfd->fps.num; // input frame rate param->iPicWidth = vfd->size.w; // width of picture in samples param->iPicHeight = vfd->size.h; // height of picture in samples param->iTargetBitrate = desc->avg_bps; // target bitrate desired param->bEnableRc = PJ_TRUE; // rc mode control param->iTemporalLayerNum = 3; // layer number at temporal level param->iSpatialLayerNum = 1; // layer number at spatial level param->bEnableDenoise = PJ_TRUE; // denoise control param->bEnableBackgroundDetection = PJ_TRUE; // background detection control param->bEnableAdaptiveQuant = PJ_TRUE; // adaptive quantization control param->bEnableFrameSkip = PJ_TRUE; // frame skipping param->bEnableLongTermReference = PJ_FALSE; // long term reference control param->bEnableFrameCroppingFlag = PJ_FALSE; param->iLoopFilterDisableIdc = 0; param->iInputCsp = videoFormatI420; // color space of input sequence param->uiIntraPeriod = 300; // period of Intra frame param->bEnableSpsPpsIdAddition = 0; param->bPrefixNalAddingCtrl = 0; param->sSpatialLayers[iIndexLayer].iVideoWidth = vfd->size.w; param->sSpatialLayers[iIndexLayer].iVideoHeight = vfd->size.h; param->sSpatialLayers[iIndexLayer].fFrameRate = (float)vfd->fps.num; param->sSpatialLayers[iIndexLayer].iSpatialBitrate = desc->avg_bps; // param->sSpatialLayers[iIndexLayer].iDLayerQp = 50; param->sSpatialLayers[iIndexLayer].uiProfileIdc = 66; param->sSpatialLayers[iIndexLayer].sSliceCfg.uiSliceMode = 4; param->sSpatialLayers[iIndexLayer].sSliceCfg.sSliceArgument.uiSliceSizeConstraint = PJMEDIA_MAX_VID_PAYLOAD_SIZE; err = callWelsEncoderFn(ff->enc)->InitializeExt(ff->enc, param); if (err == cmResultSuccess) { callWelsEncoderFn(ff->enc)->SetOption(ff->enc, ENCODER_OPTION_ENABLE_SSEI, &disable); enc_opened = PJ_TRUE; } srcPic = malloc(sizeof(SSourcePicture)); memset(srcPic, 0x00, sizeof(SSourcePicture)); srcPic->iColorFormat = param->iInputCsp; srcPic->iPicWidth = param->iPicWidth; srcPic->iPicHeight = param->iPicHeight; srcPic->iStride[0] = param->iPicWidth; srcPic->iStride[1] = param->iPicWidth / 2; srcPic->iStride[2] = param->iPicWidth / 2; ff->srcPic = srcPic; pj_mutex_unlock(ff_mutex); } /* Open decoder */ if (ff->param.dir & PJMEDIA_DIR_DECODING) { SDecodingParam sDecParam = {0}; pj_mutex_lock(ff_mutex); CreateDecoder(&ff->dec); sDecParam.iOutputColorFormat = videoFormatI420; sDecParam.uiTargetDqLayer = (unsigned char)-1; sDecParam.uiEcActiveFlag = 1; sDecParam.sVideoProperty.eVideoBsType = VIDEO_BITSTREAM_DEFAULT; callWelsDecoderFn(ff->dec)->Initialize(ff->dec, &sDecParam); pj_mutex_unlock(ff_mutex); dec_opened = PJ_TRUE; } /* Let the codec apply specific params after the codec opened */ if (ff->desc->postopen) { status = (*ff->desc->postopen)(ff); if (status != PJ_SUCCESS) goto on_error; } return PJ_SUCCESS; on_error: return status; }
/* * Get default codec parameter. */ PJ_DEF(pj_status_t) pjmedia_codec_mgr_get_default_param( pjmedia_codec_mgr *mgr, const pjmedia_codec_info *info, pjmedia_codec_param *param ) { pjmedia_codec_factory *factory; pj_status_t status; pjmedia_codec_id codec_id; struct pjmedia_codec_desc *codec_desc = NULL; unsigned i; PJ_ASSERT_RETURN(mgr && info && param, PJ_EINVAL); if (!pjmedia_codec_info_to_id(info, (char*)&codec_id, sizeof(codec_id))) return PJ_EINVAL; pj_mutex_lock(mgr->mutex); /* First, lookup default param in codec desc */ for (i=0; i < mgr->codec_cnt; ++i) { if (pj_ansi_stricmp(codec_id, mgr->codec_desc[i].id) == 0) { codec_desc = &mgr->codec_desc[i]; break; } } /* If we found the codec and its default param is set, return it */ if (codec_desc && codec_desc->param) { pj_assert(codec_desc->param->param); pj_memcpy(param, codec_desc->param->param, sizeof(pjmedia_codec_param)); pj_mutex_unlock(mgr->mutex); return PJ_SUCCESS; } /* Otherwise query the default param from codec factory */ factory = mgr->factory_list.next; while (factory != &mgr->factory_list) { if ( (*factory->op->test_alloc)(factory, info) == PJ_SUCCESS ) { status = (*factory->op->default_attr)(factory, info, param); if (status == PJ_SUCCESS) { /* Check for invalid max_bps. */ if (param->info.max_bps < param->info.avg_bps) param->info.max_bps = param->info.avg_bps; pj_mutex_unlock(mgr->mutex); return PJ_SUCCESS; } } factory = factory->next; } pj_mutex_unlock(mgr->mutex); return PJMEDIA_CODEC_EUNSUP; }
int CChannel::sendto(const sockaddr* addr, CPacket& packet) const { // convert control information into network order if (packet.getFlag()) { for (int i = 0, n = packet.getLength() / 4; i < n; ++ i) *((uint32_t *)packet.m_pcData + i) = htonl(*((uint32_t *)packet.m_pcData + i)); } uint32_t* p = packet.m_nHeader; for (int j = 0; j < 4; ++ j) { *p = htonl(*p); ++ p; } #ifdef DEBUGP //dump ctrl packet printf("\nSend Header:\n"); dumpHex((char *)packet.m_PacketVector[0].iov_base, packet.m_PacketVector[0].iov_len); char *bb = (char *)packet.m_PacketVector[0].iov_base; if(bb[0]&0x80) { printf("Data:\n"); dumpHex((char *)packet.m_PacketVector[1].iov_base, packet.m_PacketVector[1].iov_len); printf("================\n"); } #endif int res = -1; unsigned size; unsigned len; natnl_hdr hdr = {0xff, 0x00, 0x0000}; int is_tnl_data = 0; pj_thread_desc desc; pj_thread_t *thread = 0; if(m_iSocket == -1) { pjsua_call *call = (pjsua_call *)m_call; if(call == NULL) return -1; // DEAN, prevent assert fail while garbage collector remove UDT socket on multiple instance. if (!pj_thread_is_registered(call->inst_id)) { int status = pj_thread_register(call->inst_id, "CChannel::sendto", desc, &thread ); if (status != PJ_SUCCESS) return -1; } pj_mutex_lock(call->tnl_stream_lock2); natnl_stream *stream = (natnl_stream *)call->tnl_stream; if(stream == NULL) { pj_mutex_unlock(call->tnl_stream_lock2); return -1; } size = CPacket::m_iPktHdrSize + packet.getLength() + sizeof(natnl_hdr); len = (CPacket::m_iPktHdrSize + packet.getLength()); hdr.length = htons(len); memcpy((char *)&m_pktBuffer[sizeof(natnl_hdr)], packet.m_PacketVector[0].iov_base, packet.m_PacketVector[0].iov_len); memcpy((char *)&m_pktBuffer[packet.m_PacketVector[0].iov_len+sizeof(natnl_hdr)], packet.m_PacketVector[1].iov_base, packet.m_PacketVector[1].iov_len); memcpy((char *)&m_pktBuffer[0], &hdr, sizeof(natnl_hdr)); resend: // DEAN, check if this is tunnel data. If true, update last_data time. is_tnl_data = pjmedia_natnl_udt_packet_is_tnl_data(&m_pktBuffer[0], size); pj_assert(size < sizeof(m_pktBuffer)); ((pj_uint8_t*)m_pktBuffer)[size] = 0; // tunnel data flag off if (is_tnl_data) { pj_get_timestamp(&stream->last_data); // DEAN save current time ((pj_uint8_t*)m_pktBuffer)[size] = 1; // tunnel data flag on } res = pjmedia_transport_send_rtp(stream->med_tp, m_pktBuffer, size); // +Roger modified - stream pointer to med_tp #if 0 // No need to resend it, because UDT will handle this. if(res == 70011) { //EAGAIN m_pTimer->sleepto(50000); //sleep for 50 us goto resend; } #endif pj_mutex_unlock(call->tnl_stream_lock2); } res = (0 == res) ? size : -1; // convert back into local host order //for (int k = 0; k < 4; ++ k) // packet.m_nHeader[k] = ntohl(packet.m_nHeader[k]); p = packet.m_nHeader; for (int k = 0; k < 4; ++ k) { *p = ntohl(*p); ++ p; } if (packet.getFlag()) { for (int l = 0, n = packet.getLength() / 4; l < n; ++ l) *((uint32_t *)packet.m_pcData + l) = ntohl(*((uint32_t *)packet.m_pcData + l)); } return res; }
int CChannel::recvfrom(sockaddr* addr, CPacket& packet) const { int res = -1; recv_buff *rb = NULL; pj_thread_desc desc; pj_thread_t *thread = 0; if (m_iSocket == -1) { pjsua_call *call = (pjsua_call *)m_call; if(call == NULL) return -1; if(call->tnl_stream==NULL) return -1; // DEAN, prevent assert fail while garbage collector remove UDT socket on multiple instance. if (!pj_thread_is_registered(call->inst_id)) { int status = pj_thread_register(call->inst_id, "CChannel::recvfrom", desc, &thread ); if (status != PJ_SUCCESS) return -1; } pj_mutex_lock(call->tnl_stream_lock3); natnl_stream *stream = (natnl_stream *)call->tnl_stream; //get data from rBuff if (stream == NULL) { pj_mutex_unlock(call->tnl_stream_lock3); return -1; } // charles CHARLES // DEAN commeted, for using pj_sem_try_wait2 //pj_mutex_unlock(call->tnl_stream_lock3); //pj_sem_wait(stream->rbuff_sem); pj_sem_trywait2(stream->rbuff_sem); //pj_mutex_lock(call->tnl_stream_lock3); pj_mutex_lock(stream->rbuff_mutex); if (!pj_list_empty(&stream->rbuff)) { rb = stream->rbuff.next; stream->rbuff_cnt--; //PJ_LOG(4, ("channel.cpp", "rbuff_cnt=%d", stream->rbuff_cnt)); pj_list_erase(rb); /*if (rb->len > 0 && ((pj_uint32_t *)rb->buff)[0] == NO_CTL_SESS_MGR_HEADER_MAGIC) { // check the magic char *data = (char *)&rb->buff[sizeof(NO_CTL_SESS_MGR_HEADER_MAGIC)]; int len = rb->len - sizeof(NO_CTL_SESS_MGR_HEADER_MAGIC); natnl_handle_recv_msg(call->index, call->tnl_stream->med_tp, data, len); } else */if (!check_packet_integrity(rb)) { int ds = UMIN(packet.m_PacketVector[1].iov_len, rb->len - sizeof(natnl_hdr) - CPacket::m_iPktHdrSize); memcpy(packet.m_PacketVector[0].iov_base, &rb->buff[sizeof(natnl_hdr)], packet.m_PacketVector[0].iov_len); memcpy(packet.m_PacketVector[1].iov_base, &rb->buff[packet.m_PacketVector[0].iov_len+sizeof(natnl_hdr)], ds); res = rb->len - sizeof(natnl_hdr); } } pj_mutex_unlock(stream->rbuff_mutex); if (rb != NULL) { #if 1 //move rb to gcbuff pj_mutex_lock(stream->gcbuff_mutex); pj_list_push_back(&stream->gcbuff, rb); pj_mutex_unlock(stream->gcbuff_mutex); #else free(rb); rb = NULL; #endif } pj_mutex_unlock(call->tnl_stream_lock3); } if (res <= 0) { packet.setLength(-1); return -1; } packet.setLength(res - CPacket::m_iPktHdrSize); #ifdef DEBUGP printf("\nRecv Header:\n"); dumpHex((char *)packet.m_PacketVector[0].iov_base, packet.m_PacketVector[0].iov_len); char *bb = (char *)packet.m_PacketVector[0].iov_base; if(bb[0]&0x80) { printf("Data:\n"); dumpHex((char *)packet.m_PacketVector[1].iov_base, packet.m_PacketVector[1].iov_len); printf("================\n"); } #endif // convert back into local host order //for (int i = 0; i < 4; ++ i) // packet.m_nHeader[i] = ntohl(packet.m_nHeader[i]); uint32_t* p = packet.m_nHeader; for (int i = 0; i < 4; ++ i) { *p = ntohl(*p); ++ p; } if (packet.getFlag()) { for (int j = 0, n = packet.getLength() / 4; j < n; ++ j) *((uint32_t *)packet.m_pcData + j) = ntohl(*((uint32_t *)packet.m_pcData + j)); } return packet.getLength(); }
/* * Callback upon request completion. */ static void on_request_complete(pj_stun_session *stun_sess, pj_status_t status, void *token, pj_stun_tx_data *tdata, const pj_stun_msg *response, const pj_sockaddr_t *src_addr, unsigned src_addr_len) { nat_detect_session *sess; pj_stun_sockaddr_attr *mattr = NULL; pj_stun_changed_addr_attr *ca = NULL; pj_uint32_t *tsx_id; int cmp; unsigned test_id; PJ_UNUSED_ARG(token); PJ_UNUSED_ARG(tdata); PJ_UNUSED_ARG(src_addr); PJ_UNUSED_ARG(src_addr_len); sess = (nat_detect_session*) pj_stun_session_get_user_data(stun_sess); pj_mutex_lock(sess->mutex); /* Find errors in the response */ if (status == PJ_SUCCESS) { /* Check error message */ if (PJ_STUN_IS_ERROR_RESPONSE(response->hdr.type)) { pj_stun_errcode_attr *eattr; int err_code; eattr = (pj_stun_errcode_attr*) pj_stun_msg_find_attr(response, PJ_STUN_ATTR_ERROR_CODE, 0); if (eattr != NULL) err_code = eattr->err_code; else err_code = PJ_STUN_SC_SERVER_ERROR; status = PJ_STATUS_FROM_STUN_CODE(err_code); } else { /* Get MAPPED-ADDRESS or XOR-MAPPED-ADDRESS */ mattr = (pj_stun_sockaddr_attr*) pj_stun_msg_find_attr(response, PJ_STUN_ATTR_XOR_MAPPED_ADDR, 0); if (mattr == NULL) { mattr = (pj_stun_sockaddr_attr*) pj_stun_msg_find_attr(response, PJ_STUN_ATTR_MAPPED_ADDR, 0); } if (mattr == NULL) { status = PJNATH_ESTUNNOMAPPEDADDR; } /* Get CHANGED-ADDRESS attribute */ ca = (pj_stun_changed_addr_attr*) pj_stun_msg_find_attr(response, PJ_STUN_ATTR_CHANGED_ADDR, 0); if (ca == NULL) { status = PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_SERVER_ERROR); } } } /* Save the result */ tsx_id = (pj_uint32_t*) tdata->msg->hdr.tsx_id; test_id = tsx_id[2]; if (test_id >= ST_MAX) { PJ_LOG(4,(sess->pool->obj_name, "Invalid transaction ID %u in response", test_id)); end_session(sess, PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_SERVER_ERROR), PJ_STUN_NAT_TYPE_ERR_UNKNOWN); goto on_return; } PJ_LOG(5,(sess->pool->obj_name, "Completed %s, status=%d", test_names[test_id], status)); sess->result[test_id].complete = PJ_TRUE; sess->result[test_id].status = status; if (status == PJ_SUCCESS) { pj_memcpy(&sess->result[test_id].ma, &mattr->sockaddr.ipv4, sizeof(pj_sockaddr_in)); pj_memcpy(&sess->result[test_id].ca, &ca->sockaddr.ipv4, sizeof(pj_sockaddr_in)); } /* Send Test 1B only when Test 2 completes. Must not send Test 1B * before Test 2 completes to avoid creating mapping on the NAT. */ if (!sess->result[ST_TEST_1B].executed && sess->result[ST_TEST_2].complete && sess->result[ST_TEST_2].status != PJ_SUCCESS && sess->result[ST_TEST_1].complete && sess->result[ST_TEST_1].status == PJ_SUCCESS) { cmp = pj_memcmp(&sess->local_addr, &sess->result[ST_TEST_1].ma, sizeof(pj_sockaddr_in)); if (cmp != 0) send_test(sess, ST_TEST_1B, &sess->result[ST_TEST_1].ca, 0); } if (test_completed(sess)<3 || test_completed(sess)!=test_executed(sess)) goto on_return; /* Handle the test result according to RFC 3489 page 22: +--------+ | Test | | 1 | +--------+ | | V /\ /\ N / \ Y / \ Y +--------+ UDP <-------/Resp\--------->/ IP \------------->| Test | Blocked \ ? / \Same/ | 2 | \ / \? / +--------+ \/ \/ | | N | | V V /\ +--------+ Sym. N / \ | Test | UDP <---/Resp\ | 2 | Firewall \ ? / +--------+ \ / | \/ V |Y /\ /\ | Symmetric N / \ +--------+ N / \ V NAT <--- / IP \<-----| Test |<--- /Resp\ Open \Same/ | 1B | \ ? / Internet \? / +--------+ \ / \/ \/ | |Y | | | V | Full | Cone V /\ +--------+ / \ Y | Test |------>/Resp\---->Restricted | 3 | \ ? / +--------+ \ / \/ |N | Port +------>Restricted Figure 2: Flow for type discovery process */ switch (sess->result[ST_TEST_1].status) { case PJNATH_ESTUNTIMEDOUT: /* * Test 1 has timed-out. Conclude with NAT_TYPE_BLOCKED. */ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_BLOCKED); break; case PJ_SUCCESS: /* * Test 1 is successful. Further tests are needed to detect * NAT type. Compare the MAPPED-ADDRESS with the local address. */ cmp = pj_memcmp(&sess->local_addr, &sess->result[ST_TEST_1].ma, sizeof(pj_sockaddr_in)); if (cmp==0) { /* * MAPPED-ADDRESS and local address is equal. Need one more * test to determine NAT type. */ switch (sess->result[ST_TEST_2].status) { case PJ_SUCCESS: /* * Test 2 is also successful. We're in the open. */ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_OPEN); break; case PJNATH_ESTUNTIMEDOUT: /* * Test 2 has timed out. We're behind somekind of UDP * firewall. */ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_SYMMETRIC_UDP); break; default: /* * We've got other error with Test 2. */ end_session(sess, sess->result[ST_TEST_2].status, PJ_STUN_NAT_TYPE_ERR_UNKNOWN); break; } } else { /* * MAPPED-ADDRESS is different than local address. * We're behind NAT. */ switch (sess->result[ST_TEST_2].status) { case PJ_SUCCESS: /* * Test 2 is successful. We're behind a full-cone NAT. */ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_FULL_CONE); break; case PJNATH_ESTUNTIMEDOUT: /* * Test 2 has timed-out Check result of test 1B.. */ switch (sess->result[ST_TEST_1B].status) { case PJ_SUCCESS: /* * Compare the MAPPED-ADDRESS of test 1B with the * MAPPED-ADDRESS returned in test 1.. */ cmp = pj_memcmp(&sess->result[ST_TEST_1].ma, &sess->result[ST_TEST_1B].ma, sizeof(pj_sockaddr_in)); if (cmp != 0) { /* * MAPPED-ADDRESS is different, we're behind a * symmetric NAT. */ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_SYMMETRIC); } else { /* * MAPPED-ADDRESS is equal. We're behind a restricted * or port-restricted NAT, depending on the result of * test 3. */ switch (sess->result[ST_TEST_3].status) { case PJ_SUCCESS: /* * Test 3 is successful, we're behind a restricted * NAT. */ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_RESTRICTED); break; case PJNATH_ESTUNTIMEDOUT: /* * Test 3 failed, we're behind a port restricted * NAT. */ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_PORT_RESTRICTED); break; default: /* * Got other error with test 3. */ end_session(sess, sess->result[ST_TEST_3].status, PJ_STUN_NAT_TYPE_ERR_UNKNOWN); break; } } break; case PJNATH_ESTUNTIMEDOUT: /* * Strangely test 1B has failed. Maybe connectivity was * lost? Or perhaps port 3489 (the usual port number in * CHANGED-ADDRESS) is blocked? */ switch (sess->result[ST_TEST_3].status) { case PJ_SUCCESS: /* Although test 1B failed, test 3 was successful. * It could be that port 3489 is blocked, while the * NAT itself looks to be a Restricted one. */ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_RESTRICTED); break; default: /* Can't distinguish between Symmetric and Port * Restricted, so set the type to Unknown */ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_ERR_UNKNOWN); break; } break; default: /* * Got other error with test 1B. */ end_session(sess, sess->result[ST_TEST_1B].status, PJ_STUN_NAT_TYPE_ERR_UNKNOWN); break; } break; default: /* * We've got other error with Test 2. */ end_session(sess, sess->result[ST_TEST_2].status, PJ_STUN_NAT_TYPE_ERR_UNKNOWN); break; } } break; default: /* * We've got other error with Test 1. */ end_session(sess, sess->result[ST_TEST_1].status, PJ_STUN_NAT_TYPE_ERR_UNKNOWN); break; } on_return: pj_mutex_unlock(sess->mutex); }
static void tsx_callback(void *token, pjsip_event *event) { pj_status_t status; pjsip_publishc *pubc = (pjsip_publishc*) token; pjsip_transaction *tsx = event->body.tsx_state.tsx; /* Decrement pending transaction counter. */ pj_assert(pubc->pending_tsx > 0); --pubc->pending_tsx; /* Mark that we're in callback to prevent deletion (#1164) */ ++pubc->in_callback; /* If publication data has been deleted by user then remove publication * data from transaction's callback, and don't call callback. */ if (pubc->_delete_flag) { /* Nothing to do */ ; } else if (tsx->status_code == PJSIP_SC_PROXY_AUTHENTICATION_REQUIRED || tsx->status_code == PJSIP_SC_UNAUTHORIZED) { pjsip_rx_data *rdata = event->body.tsx_state.src.rdata; pjsip_tx_data *tdata; status = pjsip_auth_clt_reinit_req( &pubc->auth_sess, rdata, tsx->last_tx, &tdata); if (status != PJ_SUCCESS) { call_callback(pubc, status, tsx->status_code, &rdata->msg_info.msg->line.status.reason, rdata, -1); } else { status = pjsip_publishc_send(pubc, tdata); } } else { pjsip_rx_data *rdata; pj_int32_t expiration = 0xFFFF; if (tsx->status_code/100 == 2) { pjsip_msg *msg; pjsip_expires_hdr *expires; pjsip_generic_string_hdr *etag_hdr; const pj_str_t STR_ETAG = { "SIP-ETag", 8 }; rdata = event->body.tsx_state.src.rdata; msg = rdata->msg_info.msg; /* Save ETag value */ etag_hdr = (pjsip_generic_string_hdr*) pjsip_msg_find_hdr_by_name(msg, &STR_ETAG, NULL); if (etag_hdr) { pj_strdup(pubc->pool, &pubc->etag, &etag_hdr->hvalue); } else { pubc->etag.slen = 0; } /* Update expires value */ expires = (pjsip_expires_hdr*) pjsip_msg_find_hdr(msg, PJSIP_H_EXPIRES, NULL); if (pubc->auto_refresh && expires) expiration = expires->ivalue; if (pubc->auto_refresh && expiration!=0 && expiration!=0xFFFF) { pj_time_val delay = { 0, 0}; /* Cancel existing timer, if any */ if (pubc->timer.id != 0) { pjsip_endpt_cancel_timer(pubc->endpt, &pubc->timer); pubc->timer.id = 0; } delay.sec = expiration - DELAY_BEFORE_REFRESH; if (pubc->expires != PJSIP_PUBC_EXPIRATION_NOT_SPECIFIED && delay.sec > (pj_int32_t)pubc->expires) { delay.sec = pubc->expires; } if (delay.sec < DELAY_BEFORE_REFRESH) delay.sec = DELAY_BEFORE_REFRESH; pubc->timer.cb = &pubc_refresh_timer_cb; pubc->timer.id = REFRESH_TIMER; pubc->timer.user_data = pubc; pjsip_endpt_schedule_timer( pubc->endpt, &pubc->timer, &delay); pj_gettimeofday(&pubc->last_refresh); pubc->next_refresh = pubc->last_refresh; pubc->next_refresh.sec += delay.sec; } } else { rdata = (event->body.tsx_state.type==PJSIP_EVENT_RX_MSG) ? event->body.tsx_state.src.rdata : NULL; } /* Call callback. */ if (expiration == 0xFFFF) expiration = -1; /* Temporarily increment pending_tsx to prevent callback from * destroying pubc. */ ++pubc->pending_tsx; call_callback(pubc, PJ_SUCCESS, tsx->status_code, (rdata ? &rdata->msg_info.msg->line.status.reason : pjsip_get_status_text(tsx->status_code)), rdata, expiration); --pubc->pending_tsx; /* If we have pending request(s), send them now */ pj_mutex_lock(pubc->mutex); while (!pj_list_empty(&pubc->pending_reqs)) { pjsip_tx_data *tdata = pubc->pending_reqs.next; pj_list_erase(tdata); /* Add SIP-If-Match if we have etag and the request doesn't have * one (http://trac.pjsip.org/repos/ticket/996) */ if (pubc->etag.slen) { const pj_str_t STR_HNAME = { "SIP-If-Match", 12 }; pjsip_generic_string_hdr *sim_hdr; sim_hdr = (pjsip_generic_string_hdr*) pjsip_msg_find_hdr_by_name(tdata->msg, &STR_HNAME, NULL); if (!sim_hdr) { /* Create the header */ sim_hdr = pjsip_generic_string_hdr_create(tdata->pool, &STR_HNAME, &pubc->etag); pjsip_msg_add_hdr(tdata->msg, (pjsip_hdr*)sim_hdr); } else { /* Update */ if (pj_strcmp(&pubc->etag, &sim_hdr->hvalue)) pj_strdup(tdata->pool, &sim_hdr->hvalue, &pubc->etag); } } status = pjsip_publishc_send(pubc, tdata); if (status == PJ_EPENDING) { pj_assert(!"Not expected"); pj_list_erase(tdata); pjsip_tx_data_dec_ref(tdata); } else if (status == PJ_SUCCESS) { break; } } pj_mutex_unlock(pubc->mutex); } /* No longer in callback. */ --pubc->in_callback; /* Delete the record if user destroy pubc during the callback. */ if (pubc->_delete_flag && pubc->pending_tsx==0) { pjsip_publishc_destroy(pubc); } }
static void zrtp_synchEnter(ZrtpContext* ctx) { struct tp_zrtp *zrtp = (struct tp_zrtp*)ctx->userData; pj_mutex_lock(zrtp->zrtpMutex); }
static pj_status_t g729_alloc_codec( pjmedia_codec_factory *factory, const pjmedia_codec_info *id, pjmedia_codec **p_codec) { pjmedia_codec *codec = NULL; pj_status_t status; pj_pool_t *pool; PJ_ASSERT_RETURN(factory && id && p_codec, PJ_EINVAL); PJ_ASSERT_RETURN(factory==&g729_factory.base, PJ_EINVAL); /* Lock mutex. */ pj_mutex_lock(g729_factory.mutex); /* Allocate new codec if no more is available */ struct g729_private *codec_priv; /* Create pool for codec instance */ pool = pjmedia_endpt_create_pool(g729_factory.endpt, "g729codec", 512, 512); codec = PJ_POOL_ALLOC_T(pool, pjmedia_codec); codec_priv = PJ_POOL_ZALLOC_T(pool, struct g729_private); if (!codec || !codec_priv) { pj_pool_release(pool); pj_mutex_unlock(g729_factory.mutex); return PJ_ENOMEM; } codec_priv->pool = pool; /* Set the payload type */ codec_priv->pt = id->pt; #if !PLC_DISABLED /* Create PLC, always with 10ms ptime */ status = pjmedia_plc_create(pool, 8000, 80, 0, &codec_priv->plc); if (status != PJ_SUCCESS) { pj_pool_release(pool); pj_mutex_unlock(g729_factory.mutex); return status; } #endif /* Create VAD */ status = pjmedia_silence_det_create(g729_factory.pool, 8000, 80, &codec_priv->vad); if (status != PJ_SUCCESS) { pj_mutex_unlock(g729_factory.mutex); return status; } codec->factory = factory; codec->op = &g729_op; codec->codec_data = codec_priv; *p_codec = codec; /* Unlock mutex. */ pj_mutex_unlock(g729_factory.mutex); return PJ_SUCCESS; }
/* * Set default codec parameter. */ PJ_DEF(pj_status_t) pjmedia_codec_mgr_set_default_param( pjmedia_codec_mgr *mgr, const pjmedia_codec_info *info, const pjmedia_codec_param *param ) { unsigned i; pjmedia_codec_id codec_id; pj_pool_t *pool, *old_pool = NULL; struct pjmedia_codec_desc *codec_desc = NULL; pjmedia_codec_default_param *p; PJ_ASSERT_RETURN(mgr && info, PJ_EINVAL); if (!pjmedia_codec_info_to_id(info, (char*)&codec_id, sizeof(codec_id))) return PJ_EINVAL; pj_mutex_lock(mgr->mutex); /* Lookup codec desc */ for (i=0; i < mgr->codec_cnt; ++i) { if (pj_ansi_stricmp(codec_id, mgr->codec_desc[i].id) == 0) { codec_desc = &mgr->codec_desc[i]; break; } } /* Codec not found */ if (!codec_desc) { pj_mutex_unlock(mgr->mutex); return PJMEDIA_CODEC_EUNSUP; } /* If codec param is previously set, reset the codec param but release * the codec param pool later after the new param is set (ticket #1171). */ if (codec_desc->param) { pj_assert(codec_desc->param->pool); old_pool = codec_desc->param->pool; codec_desc->param = NULL; } /* When param is set to NULL, i.e: setting default codec param to library * default setting, just return PJ_SUCCESS. */ if (NULL == param) { pj_mutex_unlock(mgr->mutex); if (old_pool) pj_pool_release(old_pool); return PJ_SUCCESS; } /* Instantiate and initialize codec param */ pool = pj_pool_create(mgr->pf, (char*)codec_id, 256, 256, NULL); codec_desc->param = PJ_POOL_ZALLOC_T(pool, pjmedia_codec_default_param); p = codec_desc->param; p->pool = pool; p->param = PJ_POOL_ZALLOC_T(pool, pjmedia_codec_param); /* Update codec param */ pj_memcpy(p->param, param, sizeof(pjmedia_codec_param)); for (i = 0; i < param->setting.dec_fmtp.cnt; ++i) { pj_strdup(pool, &p->param->setting.dec_fmtp.param[i].name, ¶m->setting.dec_fmtp.param[i].name); pj_strdup(pool, &p->param->setting.dec_fmtp.param[i].val, ¶m->setting.dec_fmtp.param[i].val); } for (i = 0; i < param->setting.enc_fmtp.cnt; ++i) { pj_strdup(pool, &p->param->setting.enc_fmtp.param[i].name, ¶m->setting.enc_fmtp.param[i].name); pj_strdup(pool, &p->param->setting.enc_fmtp.param[i].val, ¶m->setting.enc_fmtp.param[i].val); } pj_mutex_unlock(mgr->mutex); if (old_pool) pj_pool_release(old_pool); return PJ_SUCCESS; }
/* Increment key's reference counter */ static void increment_counter(pj_ioqueue_key_t *key) { pj_mutex_lock(key->ioqueue->ref_cnt_mutex); ++key->ref_count; pj_mutex_unlock(key->ioqueue->ref_cnt_mutex); }
/* * pj_ioqueue_recv() * * Start asynchronous recv() from the socket. */ PJ_DEF(pj_status_t) pj_ioqueue_recv( pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, void *buffer, pj_ssize_t *length, unsigned flags ) { struct read_operation *read_op; PJ_ASSERT_RETURN(key && op_key && buffer && length, PJ_EINVAL); PJ_CHECK_STACK(); /* Check if key is closing (need to do this first before accessing * other variables, since they might have been destroyed. See ticket * #469). */ if (IS_CLOSING(key)) return PJ_ECANCELLED; read_op = (struct read_operation*)op_key; read_op->op = PJ_IOQUEUE_OP_NONE; /* Try to see if there's data immediately available. */ if ((flags & PJ_IOQUEUE_ALWAYS_ASYNC) == 0) { pj_status_t status; pj_ssize_t size; size = *length; status = pj_sock_recv(key->fd, buffer, &size, flags); if (status == PJ_SUCCESS) { /* Yes! Data is available! */ *length = size; return PJ_SUCCESS; } else { /* If error is not EWOULDBLOCK (or EAGAIN on Linux), report * the error to caller. */ if (status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL)) return status; } } flags &= ~(PJ_IOQUEUE_ALWAYS_ASYNC); /* * No data is immediately available. * Must schedule asynchronous operation to the ioqueue. */ read_op->op = PJ_IOQUEUE_OP_RECV; read_op->buf = buffer; read_op->size = *length; read_op->flags = flags; pj_mutex_lock(key->mutex); /* Check again. Handle may have been closed after the previous check * in multithreaded app. If we add bad handle to the set it will * corrupt the ioqueue set. See #913 */ if (IS_CLOSING(key)) { pj_mutex_unlock(key->mutex); return PJ_ECANCELLED; } pj_list_insert_before(&key->read_list, read_op); ioqueue_add_to_set(key->ioqueue, key, READABLE_EVENT); pj_mutex_unlock(key->mutex); return PJ_EPENDING; }
PJ_DEF(pj_status_t) pj_ioqueue_lock_key(pj_ioqueue_key_t *key) { return pj_mutex_lock(key->mutex); }
PJ_DEF(void) pj_enter_critical_section(void) { #if PJ_HAS_THREADS pj_mutex_lock(&critical_section); #endif }
void ioqueue_dispatch_read_event( pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h ) { pj_status_t rc; /* Lock the key. */ pj_mutex_lock(h->mutex); if (IS_CLOSING(h)) { pj_mutex_unlock(h->mutex); return; } # if PJ_HAS_TCP if (!pj_list_empty(&h->accept_list)) { struct accept_operation *accept_op; pj_bool_t has_lock; /* Get one accept operation from the list. */ accept_op = h->accept_list.next; pj_list_erase(accept_op); accept_op->op = PJ_IOQUEUE_OP_NONE; /* Clear bit in fdset if there is no more pending accept */ if (pj_list_empty(&h->accept_list)) ioqueue_remove_from_set(ioqueue, h, READABLE_EVENT); rc=pj_sock_accept(h->fd, accept_op->accept_fd, accept_op->rmt_addr, accept_op->addrlen); if (rc==PJ_SUCCESS && accept_op->local_addr) { rc = pj_sock_getsockname(*accept_op->accept_fd, accept_op->local_addr, accept_op->addrlen); } /* Unlock; from this point we don't need to hold key's mutex * (unless concurrency is disabled, which in this case we should * hold the mutex while calling the callback) */ if (h->allow_concurrent) { /* concurrency may be changed while we're in the callback, so * save it to a flag. */ has_lock = PJ_FALSE; pj_mutex_unlock(h->mutex); } else { has_lock = PJ_TRUE; } /* Call callback. */ if (h->cb.on_accept_complete && !IS_CLOSING(h)) { (*h->cb.on_accept_complete)(h, (pj_ioqueue_op_key_t*)accept_op, *accept_op->accept_fd, rc); } if (has_lock) { pj_mutex_unlock(h->mutex); } } else # endif if (key_has_pending_read(h)) { struct read_operation *read_op; pj_ssize_t bytes_read; pj_bool_t has_lock; /* Get one pending read operation from the list. */ read_op = h->read_list.next; pj_list_erase(read_op); /* Clear fdset if there is no pending read. */ if (pj_list_empty(&h->read_list)) ioqueue_remove_from_set(ioqueue, h, READABLE_EVENT); bytes_read = read_op->size; if ((read_op->op == PJ_IOQUEUE_OP_RECV_FROM)) { read_op->op = PJ_IOQUEUE_OP_NONE; rc = pj_sock_recvfrom(h->fd, read_op->buf, &bytes_read, read_op->flags, read_op->rmt_addr, read_op->rmt_addrlen); } else if ((read_op->op == PJ_IOQUEUE_OP_RECV)) { read_op->op = PJ_IOQUEUE_OP_NONE; rc = pj_sock_recv(h->fd, read_op->buf, &bytes_read, read_op->flags); } else { pj_assert(read_op->op == PJ_IOQUEUE_OP_READ); read_op->op = PJ_IOQUEUE_OP_NONE; /* * User has specified pj_ioqueue_read(). * On Win32, we should do ReadFile(). But because we got * here because of select() anyway, user must have put a * socket descriptor on h->fd, which in this case we can * just call pj_sock_recv() instead of ReadFile(). * On Unix, user may put a file in h->fd, so we'll have * to call read() here. * This may not compile on systems which doesn't have * read(). That's why we only specify PJ_LINUX here so * that error is easier to catch. */ # if defined(PJ_WIN32) && PJ_WIN32 != 0 || \ defined(PJ_WIN32_WINCE) && PJ_WIN32_WINCE != 0 rc = pj_sock_recv(h->fd, read_op->buf, &bytes_read, read_op->flags); //rc = ReadFile((HANDLE)h->fd, read_op->buf, read_op->size, // &bytes_read, NULL); # elif (defined(PJ_HAS_UNISTD_H) && PJ_HAS_UNISTD_H != 0) bytes_read = read(h->fd, read_op->buf, bytes_read); rc = (bytes_read >= 0) ? PJ_SUCCESS : pj_get_os_error(); # elif defined(PJ_LINUX_KERNEL) && PJ_LINUX_KERNEL != 0 bytes_read = sys_read(h->fd, read_op->buf, bytes_read); rc = (bytes_read >= 0) ? PJ_SUCCESS : -bytes_read; # else # error "Implement read() for this platform!" # endif } if (rc != PJ_SUCCESS) { # if defined(PJ_WIN32) && PJ_WIN32 != 0 /* On Win32, for UDP, WSAECONNRESET on the receive side * indicates that previous sending has triggered ICMP Port * Unreachable message. * But we wouldn't know at this point which one of previous * key that has triggered the error, since UDP socket can * be shared! * So we'll just ignore it! */ if (rc == PJ_STATUS_FROM_OS(WSAECONNRESET)) { //PJ_LOG(4,(THIS_FILE, // "Ignored ICMP port unreach. on key=%p", h)); } # endif /* In any case we would report this to caller. */ bytes_read = -rc; } /* Unlock; from this point we don't need to hold key's mutex * (unless concurrency is disabled, which in this case we should * hold the mutex while calling the callback) */ if (h->allow_concurrent) { /* concurrency may be changed while we're in the callback, so * save it to a flag. */ has_lock = PJ_FALSE; pj_mutex_unlock(h->mutex); } else { has_lock = PJ_TRUE; } /* Call callback. */ if (h->cb.on_read_complete && !IS_CLOSING(h)) { (*h->cb.on_read_complete)(h, (pj_ioqueue_op_key_t*)read_op, bytes_read); } if (has_lock) { pj_mutex_unlock(h->mutex); } } else { /* * This is normal; execution may fall here when multiple threads * are signalled for the same event, but only one thread eventually * able to process the event. */ pj_mutex_unlock(h->mutex); } }
/* * pj_thread_create(...) */ PJ_DEF(pj_status_t) pj_thread_create( pj_pool_t *pool, const char *thread_name, pj_thread_proc *proc, void *arg, pj_size_t stack_size, unsigned flags, pj_thread_t **ptr_thread) { #if PJ_HAS_THREADS pj_thread_t *rec; pthread_attr_t thread_attr; void *stack_addr; int rc; PJ_UNUSED_ARG(stack_addr); PJ_CHECK_STACK(); PJ_ASSERT_RETURN(pool && proc && ptr_thread, PJ_EINVAL); /* Create thread record and assign name for the thread */ rec = (struct pj_thread_t*) pj_pool_zalloc(pool, sizeof(pj_thread_t)); PJ_ASSERT_RETURN(rec, PJ_ENOMEM); /* Set name. */ if (!thread_name) thread_name = "thr%p"; if (strchr(thread_name, '%')) { pj_ansi_snprintf(rec->obj_name, PJ_MAX_OBJ_NAME, thread_name, rec); } else { strncpy(rec->obj_name, thread_name, PJ_MAX_OBJ_NAME); rec->obj_name[PJ_MAX_OBJ_NAME-1] = '\0'; } /* Set default stack size */ if (stack_size == 0) stack_size = PJ_THREAD_DEFAULT_STACK_SIZE; #if defined(PJ_OS_HAS_CHECK_STACK) && PJ_OS_HAS_CHECK_STACK!=0 rec->stk_size = stack_size; rec->stk_max_usage = 0; #endif /* Emulate suspended thread with mutex. */ if (flags & PJ_THREAD_SUSPENDED) { rc = pj_mutex_create_simple(pool, NULL, &rec->suspended_mutex); if (rc != PJ_SUCCESS) { return rc; } pj_mutex_lock(rec->suspended_mutex); } else { pj_assert(rec->suspended_mutex == NULL); } /* Init thread attributes */ pthread_attr_init(&thread_attr); #if defined(PJ_THREAD_SET_STACK_SIZE) && PJ_THREAD_SET_STACK_SIZE!=0 /* Set thread's stack size */ rc = pthread_attr_setstacksize(&thread_attr, stack_size); if (rc != 0) return PJ_RETURN_OS_ERROR(rc); #endif /* PJ_THREAD_SET_STACK_SIZE */ #if defined(PJ_THREAD_ALLOCATE_STACK) && PJ_THREAD_ALLOCATE_STACK!=0 /* Allocate memory for the stack */ stack_addr = pj_pool_alloc(pool, stack_size); PJ_ASSERT_RETURN(stack_addr, PJ_ENOMEM); rc = pthread_attr_setstackaddr(&thread_attr, stack_addr); if (rc != 0) return PJ_RETURN_OS_ERROR(rc); #endif /* PJ_THREAD_ALLOCATE_STACK */ /* Create the thread. */ rec->proc = proc; rec->arg = arg; rc = pthread_create( &rec->thread, &thread_attr, &thread_main, rec); if (rc != 0) { return PJ_RETURN_OS_ERROR(rc); } *ptr_thread = rec; PJ_LOG(6, (rec->obj_name, "Thread created")); return PJ_SUCCESS; #else pj_assert(!"Threading is disabled!"); return PJ_EINVALIDOP; #endif }
/* * pj_ioqueue_sendto() * * Start asynchronous write() to the descriptor. */ PJ_DEF(pj_status_t) pj_ioqueue_sendto( pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, const void *data, pj_ssize_t *length, pj_uint32_t flags, const pj_sockaddr_t *addr, int addrlen) { struct write_operation *write_op; unsigned retry; pj_status_t status; pj_ssize_t sent; PJ_ASSERT_RETURN(key && op_key && data && length, PJ_EINVAL); PJ_CHECK_STACK(); /* Check if key is closing. */ if (IS_CLOSING(key)) return PJ_ECANCELLED; /* We can not use PJ_IOQUEUE_ALWAYS_ASYNC for socket write */ flags &= ~(PJ_IOQUEUE_ALWAYS_ASYNC); /* Fast track: * Try to send data immediately, only if there's no pending write! * Note: * We are speculating that the list is empty here without properly * acquiring ioqueue's mutex first. This is intentional, to maximize * performance via parallelism. * * This should be safe, because: * - by convention, we require caller to make sure that the * key is not unregistered while other threads are invoking * an operation on the same key. * - pj_list_empty() is safe to be invoked by multiple threads, * even when other threads are modifying the list. */ if (pj_list_empty(&key->write_list)) { /* * See if data can be sent immediately. */ sent = *length; status = pj_sock_sendto(key->fd, data, &sent, flags, addr, addrlen); if (status == PJ_SUCCESS) { /* Success! */ *length = sent; return PJ_SUCCESS; } else { /* If error is not EWOULDBLOCK (or EAGAIN on Linux), report * the error to caller. */ if (status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL)) { return status; } status = status; } } /* * Check that address storage can hold the address parameter. */ PJ_ASSERT_RETURN(addrlen <= (int)sizeof(pj_sockaddr_in), PJ_EBUG); /* * Schedule asynchronous send. */ write_op = (struct write_operation*)op_key; /* Spin if write_op has pending operation */ for (retry=0; write_op->op != 0 && retry<PENDING_RETRY; ++retry) pj_thread_sleep(0); /* Last chance */ if (write_op->op) { /* Unable to send packet because there is already pending write on the * write_op. We could not put the operation into the write_op * because write_op already contains a pending operation! And * we could not send the packet directly with sendto() either, * because that will break the order of the packet. So we can * only return error here. * * This could happen for example in multithreads program, * where polling is done by one thread, while other threads are doing * the sending only. If the polling thread runs on lower priority * than the sending thread, then it's possible that the pending * write flag is not cleared in-time because clearing is only done * during polling. * * Aplication should specify multiple write operation keys on * situation like this. */ //pj_assert(!"ioqueue: there is pending operation on this key!"); return PJ_EBUSY; } write_op->op = PJ_IOQUEUE_OP_SEND_TO; write_op->buf = (char*)data; write_op->size = *length; write_op->written = 0; write_op->flags = flags; pj_memcpy(&write_op->rmt_addr, addr, addrlen); write_op->rmt_addrlen = addrlen; pj_mutex_lock(key->mutex); pj_list_insert_before(&key->write_list, write_op); ioqueue_add_to_set(key->ioqueue, key, WRITEABLE_EVENT); pj_mutex_unlock(key->mutex); return PJ_EPENDING; }
void SIPPresence::lock() { pj_mutex_lock(mutex_); mutex_owner_ = pj_thread_this(); ++mutex_nesting_level_; }
/* * Poll the I/O Completion Port, execute callback, * and return the key and bytes transfered of the last operation. */ static pj_bool_t poll_iocp( HANDLE hIocp, DWORD dwTimeout, pj_ssize_t *p_bytes, pj_ioqueue_key_t **p_key ) { DWORD dwBytesTransfered, dwKey; generic_overlapped *pOv; pj_ioqueue_key_t *key; pj_ssize_t size_status = -1; BOOL rcGetQueued; /* Poll for completion status. */ rcGetQueued = GetQueuedCompletionStatus(hIocp, &dwBytesTransfered, &dwKey, (OVERLAPPED**)&pOv, dwTimeout); /* The return value is: * - nonzero if event was dequeued. * - zero and pOv==NULL if no event was dequeued. * - zero and pOv!=NULL if event for failed I/O was dequeued. */ if (pOv) { pj_bool_t has_lock; /* Event was dequeued for either successfull or failed I/O */ key = (pj_ioqueue_key_t*)dwKey; size_status = dwBytesTransfered; /* Report to caller regardless */ if (p_bytes) *p_bytes = size_status; if (p_key) *p_key = key; #if PJ_IOQUEUE_HAS_SAFE_UNREG /* We shouldn't call callbacks if key is quitting. */ if (key->closing) return PJ_TRUE; /* If concurrency is disabled, lock the key * (and save the lock status to local var since app may change * concurrency setting while in the callback) */ if (key->allow_concurrent == PJ_FALSE) { pj_mutex_lock(key->mutex); has_lock = PJ_TRUE; } else { has_lock = PJ_FALSE; } /* Now that we get the lock, check again that key is not closing */ if (key->closing) { if (has_lock) { pj_mutex_unlock(key->mutex); } return PJ_TRUE; } /* Increment reference counter to prevent this key from being * deleted */ pj_atomic_inc(key->ref_count); #else PJ_UNUSED_ARG(has_lock); #endif /* Carry out the callback */ switch (pOv->operation) { case PJ_IOQUEUE_OP_READ: case PJ_IOQUEUE_OP_RECV: case PJ_IOQUEUE_OP_RECV_FROM: pOv->operation = 0; if (key->cb.on_read_complete) key->cb.on_read_complete(key, (pj_ioqueue_op_key_t*)pOv, size_status); break; case PJ_IOQUEUE_OP_WRITE: case PJ_IOQUEUE_OP_SEND: case PJ_IOQUEUE_OP_SEND_TO: pOv->operation = 0; if (key->cb.on_write_complete) key->cb.on_write_complete(key, (pj_ioqueue_op_key_t*)pOv, size_status); break; #if PJ_HAS_TCP case PJ_IOQUEUE_OP_ACCEPT: /* special case for accept. */ ioqueue_on_accept_complete(key, (ioqueue_accept_rec*)pOv); if (key->cb.on_accept_complete) { ioqueue_accept_rec *accept_rec = (ioqueue_accept_rec*)pOv; pj_status_t status = PJ_SUCCESS; pj_sock_t newsock; newsock = accept_rec->newsock; accept_rec->newsock = PJ_INVALID_SOCKET; if (newsock == PJ_INVALID_SOCKET) { int dwError = WSAGetLastError(); if (dwError == 0) dwError = OSERR_ENOTCONN; status = PJ_RETURN_OS_ERROR(dwError); } key->cb.on_accept_complete(key, (pj_ioqueue_op_key_t*)pOv, newsock, status); } break; case PJ_IOQUEUE_OP_CONNECT: #endif case PJ_IOQUEUE_OP_NONE: pj_assert(0); break; } #if PJ_IOQUEUE_HAS_SAFE_UNREG decrement_counter(key); if (has_lock) pj_mutex_unlock(key->mutex); #endif return PJ_TRUE; } /* No event was queued. */ return PJ_FALSE; }
/* * pj_enter_critical_section() */ PJ_DEF(void) pj_enter_critical_section(void) { pj_mutex_lock(&critical_section_mutex); }
/* * Register new dialog. Called by pjsip_dlg_create_uac() and * pjsip_dlg_create_uas_and_inc_lock(); */ PJ_DEF(pj_status_t) pjsip_ua_register_dlg( pjsip_user_agent *ua, pjsip_dialog *dlg ) { /* Sanity check. */ PJ_ASSERT_RETURN(ua && dlg, PJ_EINVAL); /* For all dialogs, local tag (inc hash) must has been initialized. */ PJ_ASSERT_RETURN(dlg->local.info && dlg->local.info->tag.slen && dlg->local.tag_hval != 0, PJ_EBUG); /* For UAS dialog, remote tag (inc hash) must have been initialized. */ //PJ_ASSERT_RETURN(dlg->role==PJSIP_ROLE_UAC || // (dlg->role==PJSIP_ROLE_UAS && dlg->remote.info->tag.slen // && dlg->remote.tag_hval != 0), PJ_EBUG); /* Lock the user agent. */ pj_mutex_lock(mod_ua.mutex); /* For UAC, check if there is existing dialog in the same set. */ if (dlg->role == PJSIP_ROLE_UAC) { struct dlg_set *dlg_set; dlg_set = (struct dlg_set*) pj_hash_get_lower( mod_ua.dlg_table, dlg->local.info->tag.ptr, (unsigned)dlg->local.info->tag.slen, &dlg->local.tag_hval); if (dlg_set) { /* This is NOT the first dialog in the dialog set. * Just add this dialog in the list. */ pj_assert(dlg_set->dlg_list.next != (void*)&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; } else { /* This is the first dialog in the dialog set. * Create the dialog set and add this dialog to it. */ dlg_set = alloc_dlgset_node(); pj_list_init(&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; /* Register the dialog set in the hash table. */ pj_hash_set_np_lower(mod_ua.dlg_table, dlg->local.info->tag.ptr, (unsigned)dlg->local.info->tag.slen, dlg->local.tag_hval, dlg_set->ht_entry, dlg_set); } } else { /* For UAS, create the dialog set with a single dialog as member. */ struct dlg_set *dlg_set; dlg_set = alloc_dlgset_node(); pj_list_init(&dlg_set->dlg_list); pj_list_push_back(&dlg_set->dlg_list, dlg); dlg->dlg_set = dlg_set; pj_hash_set_np_lower(mod_ua.dlg_table, dlg->local.info->tag.ptr, (unsigned)dlg->local.info->tag.slen, dlg->local.tag_hval, dlg_set->ht_entry, dlg_set); } /* Unlock user agent. */ pj_mutex_unlock(mod_ua.mutex); /* Done. */ return PJ_SUCCESS; }
/* Test with recursive mutex. */ static int recursive_mutex_test(pj_pool_t *pool) { pj_status_t rc; pj_mutex_t *mutex; PJ_LOG(3,("", "...testing recursive mutex")); /* Create mutex. */ TRACE_(("", "....create mutex")); rc = pj_mutex_create( pool, "", PJ_MUTEX_RECURSE, &mutex); if (rc != PJ_SUCCESS) { app_perror("...error: pj_mutex_create", rc); return -10; } /* Normal lock/unlock cycle. */ TRACE_(("", "....lock mutex")); rc = pj_mutex_lock(mutex); if (rc != PJ_SUCCESS) { app_perror("...error: pj_mutex_lock", rc); return -20; } TRACE_(("", "....unlock mutex")); rc = pj_mutex_unlock(mutex); if (rc != PJ_SUCCESS) { app_perror("...error: pj_mutex_unlock", rc); return -30; } /* Lock again. */ TRACE_(("", "....lock mutex")); rc = pj_mutex_lock(mutex); if (rc != PJ_SUCCESS) return -40; /* Try-lock should NOT fail. . */ TRACE_(("", "....trylock mutex")); rc = pj_mutex_trylock(mutex); if (rc != PJ_SUCCESS) { app_perror("...error: recursive mutex is not recursive!", rc); return -40; } /* Locking again should not fail. */ TRACE_(("", "....lock mutex")); rc = pj_mutex_lock(mutex); if (rc != PJ_SUCCESS) { app_perror("...error: recursive mutex is not recursive!", rc); return -45; } /* Unlock several times and done. */ TRACE_(("", "....unlock mutex 3x")); rc = pj_mutex_unlock(mutex); if (rc != PJ_SUCCESS) return -50; rc = pj_mutex_unlock(mutex); if (rc != PJ_SUCCESS) return -51; rc = pj_mutex_unlock(mutex); if (rc != PJ_SUCCESS) return -52; TRACE_(("", "....destroy mutex")); rc = pj_mutex_destroy(mutex); if (rc != PJ_SUCCESS) return -60; TRACE_(("", "....done")); return PJ_SUCCESS; }
static int timer_initialize() { pj_status_t rc; pj_mutex_t* temp_mutex; rc = pj_mutex_create_simple(timer_pool, "zrtp_timer", &temp_mutex); if (rc != PJ_SUCCESS) { return rc; } pj_enter_critical_section(); if (timer_mutex == NULL) timer_mutex = temp_mutex; else pj_mutex_destroy(temp_mutex); pj_leave_critical_section(); pj_mutex_lock(timer_mutex); if (timer_initialized) { pj_mutex_unlock(timer_mutex); return PJ_SUCCESS; } rc = pj_timer_heap_create(timer_pool, 4, &timer); if (rc != PJ_SUCCESS) { goto ERROR; } rc = pj_sem_create(timer_pool, "zrtp_timer", 0, 1, &timer_sem); if (rc != PJ_SUCCESS) { goto ERROR; } rc = pj_thread_create(timer_pool, "zrtp_timer", &timer_thread_run, NULL, PJ_THREAD_DEFAULT_STACK_SIZE, 0, &thread_run); if (rc != PJ_SUCCESS) { goto ERROR; } timer_initialized = 1; pj_mutex_unlock(timer_mutex); return PJ_SUCCESS; ERROR: if (timer != NULL) { pj_timer_heap_destroy(timer); timer = NULL; } if (timer_sem != NULL) { pj_sem_destroy(timer_sem); timer_sem = NULL; } if (timer_mutex != NULL) { pj_mutex_unlock(timer_mutex); pj_mutex_destroy(timer_mutex); timer_mutex = NULL; } return rc; }
// // Lock mutex. // pj_status_t acquire() { return pj_mutex_lock(mutex_); }