static void tsx_callback(void *token, pjsip_event *event) { pj_status_t status; pjsip_publishc *pubc = (pjsip_publishc*) token; pjsip_transaction *tsx = event->body.tsx_state.tsx; /* Decrement pending transaction counter. */ pj_assert(pubc->pending_tsx > 0); --pubc->pending_tsx; /* Mark that we're in callback to prevent deletion (#1164) */ ++pubc->in_callback; /* If publication data has been deleted by user then remove publication * data from transaction's callback, and don't call callback. */ if (pubc->_delete_flag) { /* Nothing to do */ ; } else if (tsx->status_code == PJSIP_SC_PROXY_AUTHENTICATION_REQUIRED || tsx->status_code == PJSIP_SC_UNAUTHORIZED) { pjsip_rx_data *rdata = event->body.tsx_state.src.rdata; pjsip_tx_data *tdata; status = pjsip_auth_clt_reinit_req( &pubc->auth_sess, rdata, tsx->last_tx, &tdata); if (status != PJ_SUCCESS) { call_callback(pubc, status, tsx->status_code, &rdata->msg_info.msg->line.status.reason, rdata, -1); } else { status = pjsip_publishc_send(pubc, tdata); } } else { pjsip_rx_data *rdata; pj_int32_t expiration = 0xFFFF; if (tsx->status_code/100 == 2) { pjsip_msg *msg; pjsip_expires_hdr *expires; pjsip_generic_string_hdr *etag_hdr; const pj_str_t STR_ETAG = { "SIP-ETag", 8 }; rdata = event->body.tsx_state.src.rdata; msg = rdata->msg_info.msg; /* Save ETag value */ etag_hdr = (pjsip_generic_string_hdr*) pjsip_msg_find_hdr_by_name(msg, &STR_ETAG, NULL); if (etag_hdr) { pj_strdup(pubc->pool, &pubc->etag, &etag_hdr->hvalue); } else { pubc->etag.slen = 0; } /* Update expires value */ expires = (pjsip_expires_hdr*) pjsip_msg_find_hdr(msg, PJSIP_H_EXPIRES, NULL); if (pubc->auto_refresh && expires) expiration = expires->ivalue; if (pubc->auto_refresh && expiration!=0 && expiration!=0xFFFF) { pj_time_val delay = { 0, 0}; /* Cancel existing timer, if any */ if (pubc->timer.id != 0) { pjsip_endpt_cancel_timer(pubc->endpt, &pubc->timer); pubc->timer.id = 0; } delay.sec = expiration - DELAY_BEFORE_REFRESH; if (pubc->expires != PJSIP_PUBC_EXPIRATION_NOT_SPECIFIED && delay.sec > (pj_int32_t)pubc->expires) { delay.sec = pubc->expires; } if (delay.sec < DELAY_BEFORE_REFRESH) delay.sec = DELAY_BEFORE_REFRESH; pubc->timer.cb = &pubc_refresh_timer_cb; pubc->timer.id = REFRESH_TIMER; pubc->timer.user_data = pubc; pjsip_endpt_schedule_timer( pubc->endpt, &pubc->timer, &delay); pj_gettimeofday(&pubc->last_refresh); pubc->next_refresh = pubc->last_refresh; pubc->next_refresh.sec += delay.sec; } } else { rdata = (event->body.tsx_state.type==PJSIP_EVENT_RX_MSG) ? event->body.tsx_state.src.rdata : NULL; } /* Call callback. */ if (expiration == 0xFFFF) expiration = -1; /* Temporarily increment pending_tsx to prevent callback from * destroying pubc. */ ++pubc->pending_tsx; call_callback(pubc, PJ_SUCCESS, tsx->status_code, (rdata ? &rdata->msg_info.msg->line.status.reason : pjsip_get_status_text(tsx->status_code)), rdata, expiration); --pubc->pending_tsx; /* If we have pending request(s), send them now */ pj_mutex_lock(pubc->mutex); while (!pj_list_empty(&pubc->pending_reqs)) { pending_publish *pp = pubc->pending_reqs.next; pjsip_tx_data *tdata = pp->tdata; /* Remove the request from pending request list, * and keep the unused entry into pending_reqs_empty pool. */ pj_list_erase(pp); pj_list_push_back(&pubc->pending_reqs_empty, pp); /* Add SIP-If-Match if we have etag and the request doesn't have * one (http://trac.pjsip.org/repos/ticket/996) */ if (pubc->etag.slen) { const pj_str_t STR_HNAME = { "SIP-If-Match", 12 }; pjsip_generic_string_hdr *sim_hdr; sim_hdr = (pjsip_generic_string_hdr*) pjsip_msg_find_hdr_by_name(tdata->msg, &STR_HNAME, NULL); if (!sim_hdr) { /* Create the header */ sim_hdr = pjsip_generic_string_hdr_create(tdata->pool, &STR_HNAME, &pubc->etag); pjsip_msg_add_hdr(tdata->msg, (pjsip_hdr*)sim_hdr); } else { /* Update */ if (pj_strcmp(&pubc->etag, &sim_hdr->hvalue)) pj_strdup(tdata->pool, &sim_hdr->hvalue, &pubc->etag); } } status = pjsip_publishc_send(pubc, tdata); if (status == PJ_EPENDING) { pj_assert(!"Not expected"); pjsip_tx_data_dec_ref(tdata); } else if (status == PJ_SUCCESS) { break; } } pj_mutex_unlock(pubc->mutex); } /* No longer in callback. */ --pubc->in_callback; /* Delete the record if user destroy pubc during the callback. */ if (pubc->_delete_flag && pubc->pending_tsx==0) { pjsip_publishc_destroy(pubc); } }
/* * Perform echo cancellation. */ PJ_DEF(pj_status_t) echo_supp_cancel_echo( void *state, pj_int16_t *rec_frm, const pj_int16_t *play_frm, unsigned options, void *reserved ) { unsigned i, N; echo_supp *ec = (echo_supp*) state; PJ_UNUSED_ARG(options); PJ_UNUSED_ARG(reserved); /* Calculate number of segments. This should be okay even if * samples_per_frame is not a multiply of samples_per_segment, since * we only calculate level. */ N = ec->samples_per_frame / ec->samples_per_segment; pj_assert(N>0); for (i=0; i<N; ++i) { unsigned pos = i * ec->samples_per_segment; echo_supp_update(ec, rec_frm+pos, play_frm+pos); } if (ec->tail_index < 0) { /* Not ready */ } else { unsigned lookup_cnt, rec_level=0, play_level=0; unsigned tail_cnt; float factor; /* How many previous segments to lookup */ lookup_cnt = SIGNAL_LOOKUP_MSEC / SEGMENT_PTIME; if (lookup_cnt > ec->templ_cnt) lookup_cnt = ec->templ_cnt; /* Lookup in recording history to get maximum mic level, to see * if local user is currently talking */ for (i=ec->templ_cnt - lookup_cnt; i < ec->templ_cnt; ++i) { if (ec->rec_hist[i] > rec_level) rec_level = ec->rec_hist[i]; } rec_level = pjmedia_linear2ulaw(rec_level) ^ 0xFF; /* Calculate the detected tail length, in # of segments */ tail_cnt = (ec->tail_cnt - ec->tail_index); /* Lookup in playback history to get max speaker level, to see * if remote user is currently talking */ for (i=ec->play_hist_cnt -lookup_cnt -tail_cnt; i<ec->play_hist_cnt-tail_cnt; ++i) { if (ec->play_hist[i] > play_level) play_level = ec->play_hist[i]; } play_level = pjmedia_linear2ulaw(play_level) ^ 0xFF; if (rec_level >= MIN_SIGNAL_ULAW) { if (play_level < MIN_SIGNAL_ULAW) { /* Mic is talking, speaker is idle. Let mic signal pass as is. */ factor = 1.0; echo_supp_set_state(ec, ST_LOCAL_TALK, rec_level); } else if (rec_level > play_level) { /* Seems that both are talking. Scale the mic signal * down a little bit to reduce echo, while allowing both * parties to talk at the same time. */ factor = (float)(ec->avg_factor[ec->tail_index] * 2); echo_supp_set_state(ec, ST_DOUBLETALK, rec_level); } else { /* Speaker is active, but we've picked up large signal in * the microphone. Assume that this is an echo, so bring * the level down to minimum too. */ factor = ec->min_factor[ec->tail_index] / 2; echo_supp_set_state(ec, ST_REM_TALK, play_level); } } else { if (play_level < MIN_SIGNAL_ULAW) { /* Both mic and speaker seems to be idle. Also scale the * mic signal down with average factor to reduce low power * echo. */ factor = ec->avg_factor[ec->tail_index] * 3 / 2; echo_supp_set_state(ec, ST_REM_SILENT, rec_level); } else { /* Mic is idle, but there's something playing in speaker. * Scale the mic down to minimum */ factor = ec->min_factor[ec->tail_index] / 2; echo_supp_set_state(ec, ST_REM_TALK, play_level); } } /* Smoothen the transition */ if (factor >= ec->last_factor) factor = (factor + ec->last_factor) / 2; else factor = (factor + ec->last_factor*19) / 20; /* Amplify frame */ amplify_frame(rec_frm, ec->samples_per_frame, pj_ufloat_from_float(factor)); ec->last_factor = factor; if (ec->talk_state == ST_REM_TALK) { unsigned level, recalc_cnt; /* Get the adjusted frame signal level */ level = pjmedia_calc_avg_signal(rec_frm, ec->samples_per_frame); level = pjmedia_linear2ulaw(level) ^ 0xFF; /* Accumulate average echo residue to see the ES effectiveness */ ec->residue = ((ec->residue * ec->running_cnt) + level) / (ec->running_cnt + 1); ++ec->running_cnt; /* Check if we need to re-learn */ recalc_cnt = CHECK_PERIOD * ec->clock_rate / ec->samples_per_frame; if (ec->running_cnt > recalc_cnt) { int iresidue; iresidue = (int)(ec->residue*1000); PJ_LOG(5,(THIS_FILE, "Echo suppressor residue = %d.%03d", iresidue/1000, iresidue%1000)); if (ec->residue > MAX_RESIDUE && !ec->learning) { echo_supp_soft_reset(ec); ec->residue = 0; } else { ec->running_cnt = 0; ec->residue = 0; } } } } return PJ_SUCCESS; }
PJ_DEF(const pj_str_t*) pjpidf_tuple_get_id(const pjpidf_tuple *t) { const pj_xml_attr *attr = pj_xml_find_attr((pj_xml_node*)t, &ID, NULL); pj_assert(attr); return &attr->value; }
/* * pj_ioqueue_register_sock() * * Register a socket to ioqueue. */ PJ_DEF(pj_status_t) pj_ioqueue_register_sock2(pj_pool_t *pool, pj_ioqueue_t *ioqueue, pj_sock_t sock, pj_grp_lock_t *grp_lock, void *user_data, const pj_ioqueue_callback *cb, pj_ioqueue_key_t **p_key) { pj_ioqueue_key_t *key = NULL; pj_uint32_t value; struct epoll_event ev; int status; pj_status_t rc = PJ_SUCCESS; PJ_ASSERT_RETURN(pool && ioqueue && sock != PJ_INVALID_SOCKET && cb && p_key, PJ_EINVAL); pj_lock_acquire(ioqueue->lock); if (ioqueue->count >= ioqueue->max) { rc = PJ_ETOOMANY; TRACE_((THIS_FILE, "pj_ioqueue_register_sock error: too many files")); goto on_return; } /* Set socket to nonblocking. */ value = 1; if ((rc=os_ioctl(sock, FIONBIO, (ioctl_val_type)&value))) { TRACE_((THIS_FILE, "pj_ioqueue_register_sock error: ioctl rc=%d", rc)); rc = pj_get_netos_error(); goto on_return; } /* If safe unregistration (PJ_IOQUEUE_HAS_SAFE_UNREG) is used, get * the key from the free list. Otherwise allocate a new one. */ #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Scan closing_keys first to let them come back to free_list */ scan_closing_keys(ioqueue); pj_assert(!pj_list_empty(&ioqueue->free_list)); if (pj_list_empty(&ioqueue->free_list)) { rc = PJ_ETOOMANY; goto on_return; } key = ioqueue->free_list.next; pj_list_erase(key); #else /* Create key. */ key = (pj_ioqueue_key_t*)pj_pool_zalloc(pool, sizeof(pj_ioqueue_key_t)); #endif rc = ioqueue_init_key(pool, ioqueue, key, sock, grp_lock, user_data, cb); if (rc != PJ_SUCCESS) { key = NULL; goto on_return; } /* Create key's mutex */ /* rc = pj_mutex_create_recursive(pool, NULL, &key->mutex); if (rc != PJ_SUCCESS) { key = NULL; goto on_return; } */ /* os_epoll_ctl. */ ev.events = EPOLLIN | EPOLLERR; ev.epoll_data = (epoll_data_type)key; status = os_epoll_ctl(ioqueue->epfd, EPOLL_CTL_ADD, sock, &ev); if (status < 0) { rc = pj_get_os_error(); pj_lock_destroy(key->lock); key = NULL; TRACE_((THIS_FILE, "pj_ioqueue_register_sock error: os_epoll_ctl rc=%d", status)); goto on_return; } /* Register */ pj_list_insert_before(&ioqueue->active_list, key); ++ioqueue->count; //TRACE_((THIS_FILE, "socket registered, count=%d", ioqueue->count)); on_return: if (rc != PJ_SUCCESS) { if (key && key->grp_lock) pj_grp_lock_dec_ref_dbg(key->grp_lock, "ioqueue", 0); } *p_key = key; pj_lock_release(ioqueue->lock); return rc; }
/* * pj_ioqueue_poll() * */ PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioqueue, const pj_time_val *timeout) { int i, count, event_cnt, processed_cnt; int msec; //struct epoll_event *events = ioqueue->events; //struct queue *queue = ioqueue->queue; enum { MAX_EVENTS = PJ_IOQUEUE_MAX_CAND_EVENTS }; struct epoll_event events[MAX_EVENTS]; struct queue queue[MAX_EVENTS]; pj_timestamp t1, t2; PJ_CHECK_STACK(); msec = timeout ? PJ_TIME_VAL_MSEC(*timeout) : 9000; TRACE_((THIS_FILE, "start os_epoll_wait, msec=%d", msec)); pj_get_timestamp(&t1); //count = os_epoll_wait( ioqueue->epfd, events, ioqueue->max, msec); count = os_epoll_wait( ioqueue->epfd, events, MAX_EVENTS, msec); if (count == 0) { #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Check the closing keys only when there's no activity and when there are * pending closing keys. */ if (count == 0 && !pj_list_empty(&ioqueue->closing_list)) { pj_lock_acquire(ioqueue->lock); scan_closing_keys(ioqueue); pj_lock_release(ioqueue->lock); } #endif TRACE_((THIS_FILE, "os_epoll_wait timed out")); return count; } else if (count < 0) { TRACE_((THIS_FILE, "os_epoll_wait error")); return -pj_get_netos_error(); } pj_get_timestamp(&t2); TRACE_((THIS_FILE, "os_epoll_wait returns %d, time=%d usec", count, pj_elapsed_usec(&t1, &t2))); /* Lock ioqueue. */ pj_lock_acquire(ioqueue->lock); for (event_cnt=0, i=0; i<count; ++i) { pj_ioqueue_key_t *h = (pj_ioqueue_key_t*)(epoll_data_type) events[i].epoll_data; TRACE_((THIS_FILE, "event %d: events=%d", i, events[i].events)); /* * Check readability. */ if ((events[i].events & EPOLLIN) && (key_has_pending_read(h) || key_has_pending_accept(h)) && !IS_CLOSING(h) ) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[event_cnt].key = h; queue[event_cnt].event_type = READABLE_EVENT; ++event_cnt; continue; } /* * Check for writeability. */ if ((events[i].events & EPOLLOUT) && key_has_pending_write(h) && !IS_CLOSING(h)) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[event_cnt].key = h; queue[event_cnt].event_type = WRITEABLE_EVENT; ++event_cnt; continue; } #if PJ_HAS_TCP /* * Check for completion of connect() operation. */ if ((events[i].events & EPOLLOUT) && (h->connecting) && !IS_CLOSING(h)) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[event_cnt].key = h; queue[event_cnt].event_type = WRITEABLE_EVENT; ++event_cnt; continue; } #endif /* PJ_HAS_TCP */ /* * Check for error condition. */ if ((events[i].events & EPOLLERR) && !IS_CLOSING(h)) { /* * We need to handle this exception event. If it's related to us * connecting, report it as such. If not, just report it as a * read event and the higher layers will handle it. */ if (h->connecting) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[event_cnt].key = h; queue[event_cnt].event_type = EXCEPTION_EVENT; ++event_cnt; } else if (key_has_pending_read(h) || key_has_pending_accept(h)) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[event_cnt].key = h; queue[event_cnt].event_type = READABLE_EVENT; ++event_cnt; } continue; } } for (i=0; i<event_cnt; ++i) { if (queue[i].key->grp_lock) pj_grp_lock_add_ref_dbg(queue[i].key->grp_lock, "ioqueue", 0); } PJ_RACE_ME(5); pj_lock_release(ioqueue->lock); PJ_RACE_ME(5); processed_cnt = 0; /* Now process the events. */ for (i=0; i<event_cnt; ++i) { /* Just do not exceed PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL */ if (processed_cnt < PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL) { switch (queue[i].event_type) { case READABLE_EVENT: if (ioqueue_dispatch_read_event(ioqueue, queue[i].key)) ++processed_cnt; break; case WRITEABLE_EVENT: if (ioqueue_dispatch_write_event(ioqueue, queue[i].key)) ++processed_cnt; break; case EXCEPTION_EVENT: if (ioqueue_dispatch_exception_event(ioqueue, queue[i].key)) ++processed_cnt; break; case NO_EVENT: pj_assert(!"Invalid event!"); break; } } #if PJ_IOQUEUE_HAS_SAFE_UNREG decrement_counter(queue[i].key); #endif if (queue[i].key->grp_lock) pj_grp_lock_dec_ref_dbg(queue[i].key->grp_lock, "ioqueue", 0); } /* Special case: * When epoll returns > 0 but no descriptors are actually set! */ if (count > 0 && !event_cnt && msec > 0) { pj_thread_sleep(msec); } TRACE_((THIS_FILE, " poll: count=%d events=%d processed=%d", count, event_cnt, processed_cnt)); pj_get_timestamp(&t1); TRACE_((THIS_FILE, "ioqueue_poll() returns %d, time=%d usec", processed, pj_elapsed_usec(&t2, &t1))); return processed_cnt; }
/* This is our RTP callback, that is called by the slave transport when it * receives RTP packet. */ static void transport_rtp_cb(void *user_data, void *pkt, pj_ssize_t size) { struct tp_zrtp *zrtp = (struct tp_zrtp*)user_data; pj_uint8_t* buffer = (pj_uint8_t*)pkt; int32_t newLen = 0; pj_status_t rc = PJ_SUCCESS; pj_assert(zrtp && zrtp->stream_rtcp_cb && pkt); // check if this could be a real RTP/SRTP packet. if ((*buffer & 0xf0) != 0x10) { // Could be real RTP, check if we are in secure mode if (zrtp->srtpReceive == NULL || size < 0) { zrtp->stream_rtp_cb(zrtp->stream_user_data, pkt, size); } else { rc = zsrtp_unprotect(zrtp->srtpReceive, pkt, size, &newLen); if (rc == 1) { zrtp->unprotect++; zrtp->stream_rtp_cb(zrtp->stream_user_data, pkt, newLen); zrtp->unprotect_err = 0; } else { if (rc == -1) { zrtp->userCallback->zrtp_showMessage(zrtp->userCallback->userData, zrtp_Warning, zrtp_WarningSRTPauthError); } else { zrtp->userCallback->zrtp_showMessage(zrtp->userCallback->userData, zrtp_Warning, zrtp_WarningSRTPreplayError); } zrtp->unprotect_err = rc; } } if (!zrtp->started && zrtp->enableZrtp) pjmedia_transport_zrtp_startZrtp((pjmedia_transport *)zrtp); return; } // We assume all other packets are ZRTP packets here. Process // if ZRTP processing is enabled. Because valid RTP packets are // already handled we delete any packets here after processing. if (zrtp->enableZrtp && zrtp->zrtpCtx != NULL) { // Get CRC value into crc (see above how to compute the offset) pj_uint16_t temp = size - CRC_SIZE; pj_uint32_t crc = *(uint32_t*)(buffer + temp); crc = pj_ntohl(crc); if (!zrtp_CheckCksum(buffer, temp, crc)) { if (zrtp->userCallback != NULL) zrtp->userCallback->zrtp_showMessage(zrtp->userCallback->userData, zrtp_Warning, zrtp_WarningCRCmismatch); return; } pj_uint32_t magic = *(pj_uint32_t*)(buffer + 4); magic = pj_ntohl(magic); // Check if it is really a ZRTP packet, return, no further processing if (magic != ZRTP_MAGIC || zrtp->zrtpCtx == NULL) { return; } // cover the case if the other party sends _only_ ZRTP packets at the // beginning of a session. Start ZRTP in this case as well. if (!zrtp->started) { pjmedia_transport_zrtp_startZrtp((pjmedia_transport *)zrtp); } // this now points beyond the undefined and length field. // We need them, thus adjust unsigned char* zrtpMsg = (buffer + 12); // store peer's SSRC in host order, used when creating the CryptoContext zrtp->peerSSRC = *(pj_uint32_t*)(buffer + 8); zrtp->peerSSRC = pj_ntohl(zrtp->peerSSRC); zrtp_processZrtpMessage(zrtp->zrtpCtx, zrtpMsg, zrtp->peerSSRC, size); } }
static pj_status_t transport_encode_sdp(pjmedia_transport *tp, pj_pool_t *sdp_pool, pjmedia_sdp_session *sdp_local, const pjmedia_sdp_session *sdp_remote, unsigned media_index) { struct transport_srtp *srtp = (struct transport_srtp*) tp; pjmedia_sdp_media *m_rem, *m_loc; enum { MAXLEN = 512 }; char buffer[MAXLEN]; int buffer_len; pj_status_t status; pjmedia_sdp_attr *attr; pj_str_t attr_value; unsigned i, j; PJ_ASSERT_RETURN(tp && sdp_pool && sdp_local, PJ_EINVAL); pj_bzero(&srtp->rx_policy_neg, sizeof(srtp->rx_policy_neg)); pj_bzero(&srtp->tx_policy_neg, sizeof(srtp->tx_policy_neg)); srtp->offerer_side = sdp_remote == NULL; m_rem = sdp_remote ? sdp_remote->media[media_index] : NULL; m_loc = sdp_local->media[media_index]; /* Bypass if media transport is not RTP/AVP or RTP/SAVP */ if (pj_stricmp(&m_loc->desc.transport, &ID_RTP_AVP) != 0 && pj_stricmp(&m_loc->desc.transport, &ID_RTP_SAVP) != 0) goto BYPASS_SRTP; /* If the media is inactive, do nothing. */ /* No, we still need to process SRTP offer/answer even if the media is * marked as inactive, because the transport is still alive in this * case (e.g. for keep-alive). See: * http://trac.pjsip.org/repos/ticket/1079 */ /* if (pjmedia_sdp_media_find_attr(m_loc, &ID_INACTIVE, NULL) || (m_rem && pjmedia_sdp_media_find_attr(m_rem, &ID_INACTIVE, NULL))) goto BYPASS_SRTP; */ /* Check remote media transport & set local media transport * based on SRTP usage option. */ if (srtp->offerer_side) { /* Generate transport */ switch (srtp->setting.use) { case PJMEDIA_SRTP_DISABLED: goto BYPASS_SRTP; case PJMEDIA_SRTP_OPTIONAL: m_loc->desc.transport = (srtp->peer_use == PJMEDIA_SRTP_MANDATORY)? ID_RTP_SAVP : ID_RTP_AVP; break; case PJMEDIA_SRTP_MANDATORY: m_loc->desc.transport = ID_RTP_SAVP; break; } /* Generate crypto attribute if not yet */ if (pjmedia_sdp_media_find_attr(m_loc, &ID_CRYPTO, NULL) == NULL) { for (i=0; i<srtp->setting.crypto_count; ++i) { /* Offer crypto-suites based on setting. */ buffer_len = MAXLEN; status = generate_crypto_attr_value(srtp->pool, buffer, &buffer_len, &srtp->setting.crypto[i], i+1); if (status != PJ_SUCCESS) return status; /* If buffer_len==0, just skip the crypto attribute. */ if (buffer_len) { pj_strset(&attr_value, buffer, buffer_len); attr = pjmedia_sdp_attr_create(srtp->pool, ID_CRYPTO.ptr, &attr_value); m_loc->attr[m_loc->attr_count++] = attr; } } } } else { /* Answerer side */ pj_assert(sdp_remote && m_rem); /* Generate transport */ switch (srtp->setting.use) { case PJMEDIA_SRTP_DISABLED: if (pj_stricmp(&m_rem->desc.transport, &ID_RTP_SAVP) == 0) return PJMEDIA_SRTP_ESDPINTRANSPORT; goto BYPASS_SRTP; case PJMEDIA_SRTP_OPTIONAL: m_loc->desc.transport = m_rem->desc.transport; break; case PJMEDIA_SRTP_MANDATORY: if (pj_stricmp(&m_rem->desc.transport, &ID_RTP_SAVP) != 0) return PJMEDIA_SRTP_ESDPINTRANSPORT; m_loc->desc.transport = ID_RTP_SAVP; break; } /* Generate crypto attribute if not yet */ if (pjmedia_sdp_media_find_attr(m_loc, &ID_CRYPTO, NULL) == NULL) { pjmedia_srtp_crypto tmp_rx_crypto; pj_bool_t has_crypto_attr = PJ_FALSE; int matched_idx = -1; int chosen_tag = 0; int tags[64]; /* assume no more than 64 crypto attrs in a media */ unsigned cr_attr_count = 0; /* Find supported crypto-suite, get the tag, and assign policy_local */ for (i=0; i<m_rem->attr_count; ++i) { if (pj_stricmp(&m_rem->attr[i]->name, &ID_CRYPTO) != 0) continue; has_crypto_attr = PJ_TRUE; status = parse_attr_crypto(srtp->pool, m_rem->attr[i], &tmp_rx_crypto, &tags[cr_attr_count]); if (status != PJ_SUCCESS) return status; /* Check duplicated tag */ for (j=0; j<cr_attr_count; ++j) { if (tags[j] == tags[cr_attr_count]) { DEACTIVATE_MEDIA(sdp_pool, m_loc); return PJMEDIA_SRTP_ESDPDUPCRYPTOTAG; } } if (matched_idx == -1) { /* lets see if the crypto-suite offered is supported */ for (j=0; j<srtp->setting.crypto_count; ++j) if (pj_stricmp(&tmp_rx_crypto.name, &srtp->setting.crypto[j].name) == 0) { int cs_idx = get_crypto_idx(&tmp_rx_crypto.name); /* Force to use test key */ /* bad keys for snom: */ //char *hex_test_key = "58b29c5c8f42308120ce857e439f2d" // "7810a8b10ad0b1446be5470faea496"; //char *hex_test_key = "20a26aac7ba062d356ff52b61e3993" // "ccb78078f12c64db94b9c294927fd0"; //pj_str_t *test_key = &srtp->setting.crypto[j].key; //char *raw_test_key = pj_pool_zalloc(srtp->pool, 64); //hex_string_to_octet_string( // raw_test_key, // hex_test_key, // strlen(hex_test_key)); //pj_strset(test_key, raw_test_key, // crypto_suites[cs_idx].cipher_key_len); /* EO Force to use test key */ if (tmp_rx_crypto.key.slen != (int)crypto_suites[cs_idx].cipher_key_len) return PJMEDIA_SRTP_EINKEYLEN; srtp->rx_policy_neg = tmp_rx_crypto; chosen_tag = tags[cr_attr_count]; matched_idx = j; break; } } cr_attr_count++; } /* Check crypto negotiation result */ switch (srtp->setting.use) { case PJMEDIA_SRTP_DISABLED: pj_assert(!"Should never reach here"); break; case PJMEDIA_SRTP_OPTIONAL: /* bypass SRTP when no crypto-attr and remote uses RTP/AVP */ if (!has_crypto_attr && pj_stricmp(&m_rem->desc.transport, &ID_RTP_AVP) == 0) goto BYPASS_SRTP; /* bypass SRTP when nothing match and remote uses RTP/AVP */ else if (matched_idx == -1 && pj_stricmp(&m_rem->desc.transport, &ID_RTP_AVP) == 0) goto BYPASS_SRTP; break; case PJMEDIA_SRTP_MANDATORY: /* Do nothing, intentional */ break; } /* No crypto attr */ if (!has_crypto_attr) { DEACTIVATE_MEDIA(sdp_pool, m_loc); return PJMEDIA_SRTP_ESDPREQCRYPTO; } /* No crypto match */ if (matched_idx == -1) { DEACTIVATE_MEDIA(sdp_pool, m_loc); return PJMEDIA_SRTP_ENOTSUPCRYPTO; } /* we have to generate crypto answer, * with srtp->tx_policy_neg matched the offer * and rem_tag contains matched offer tag. */ buffer_len = MAXLEN; status = generate_crypto_attr_value(srtp->pool, buffer, &buffer_len, &srtp->setting.crypto[matched_idx], chosen_tag); if (status != PJ_SUCCESS) return status; srtp->tx_policy_neg = srtp->setting.crypto[matched_idx]; /* If buffer_len==0, just skip the crypto attribute. */ if (buffer_len) { pj_strset(&attr_value, buffer, buffer_len); attr = pjmedia_sdp_attr_create(sdp_pool, ID_CRYPTO.ptr, &attr_value); m_loc->attr[m_loc->attr_count++] = attr; } /* At this point, we get valid rx_policy_neg & tx_policy_neg. */ } } goto PROPAGATE_MEDIA_CREATE; BYPASS_SRTP: /* Do not update this flag here as actually the media session hasn't been * updated. */ //srtp->bypass_srtp = PJ_TRUE; PROPAGATE_MEDIA_CREATE: return pjmedia_transport_encode_sdp(srtp->member_tp, sdp_pool, sdp_local, sdp_remote, media_index); }
/* * Convert IPv4/IPv6 address to text. */ PJ_DEF(pj_status_t) pj_inet_ntop(int af, const void *src, char *dst, int size) { PJ_ASSERT_RETURN(src && dst && size, PJ_EINVAL); *dst = '\0'; PJ_ASSERT_RETURN(af==PJ_AF_INET || af==PJ_AF_INET6, PJ_EAFNOTSUP); #if defined(PJ_SOCK_HAS_INET_NTOP) && PJ_SOCK_HAS_INET_NTOP != 0 /* * Implementation using inet_ntop() */ if (inet_ntop(af, src, dst, size) == NULL) { pj_status_t status = pj_get_netos_error(); if (status == PJ_SUCCESS) status = PJ_EUNKNOWN; return status; } return PJ_SUCCESS; #elif defined(PJ_WIN32) || defined(PJ_WIN64) || defined(PJ_WIN32_WINCE) /* * Implementation on Windows, using WSAAddressToString(). * Should also work on Unicode systems. */ { PJ_DECL_UNICODE_TEMP_BUF(wtempaddr,PJ_INET6_ADDRSTRLEN) pj_sockaddr sock_addr; DWORD addr_len, addr_str_len; int rc; pj_bzero(&sock_addr, sizeof(sock_addr)); sock_addr.addr.sa_family = (pj_uint16_t)af; if (af == PJ_AF_INET) { if (size < PJ_INET_ADDRSTRLEN) return PJ_ETOOSMALL; pj_memcpy(&sock_addr.ipv4.sin_addr, src, 4); addr_len = sizeof(pj_sockaddr_in); addr_str_len = PJ_INET_ADDRSTRLEN; } else if (af == PJ_AF_INET6) { if (size < PJ_INET6_ADDRSTRLEN) return PJ_ETOOSMALL; pj_memcpy(&sock_addr.ipv6.sin6_addr, src, 16); addr_len = sizeof(pj_sockaddr_in6); addr_str_len = PJ_INET6_ADDRSTRLEN; } else { pj_assert(!"Unsupported address family"); return PJ_EAFNOTSUP; } #if PJ_NATIVE_STRING_IS_UNICODE rc = WSAAddressToString((LPSOCKADDR)&sock_addr, addr_len, NULL, wtempaddr, &addr_str_len); if (rc == 0) { pj_unicode_to_ansi(wtempaddr, wcslen(wtempaddr), dst, size); } #else rc = WSAAddressToString((LPSOCKADDR)&sock_addr, addr_len, NULL, dst, &addr_str_len); #endif if (rc != 0) { pj_status_t status = pj_get_netos_error(); if (status == PJ_SUCCESS) status = PJ_EUNKNOWN; return status; } return PJ_SUCCESS; } #elif !defined(PJ_HAS_IPV6) || PJ_HAS_IPV6==0 /* IPv6 support is disabled, just return error without raising assertion */ return PJ_EIPV6NOTSUP; #else pj_assert(!"Not supported"); return PJ_EIPV6NOTSUP; #endif }
static pj_ssize_t pjsip_url_print( pjsip_uri_context_e context, const pjsip_sip_uri *url, char *buf, pj_size_t size) { int printed; char *startbuf = buf; char *endbuf = buf+size; const pj_str_t *scheme; *buf = '\0'; /* Print scheme ("sip:" or "sips:") */ scheme = pjsip_uri_get_scheme(url); copy_advance_check(buf, *scheme); *buf++ = ':'; /* Print "user:password@", if any. */ if (url->user.slen) { copy_advance_escape(buf, url->user, pjsip_USER_SPEC); if (url->passwd.slen) { *buf++ = ':'; copy_advance_escape(buf, url->passwd, pjsip_PASSWD_SPEC); } *buf++ = '@'; } /* Print host. */ pj_assert(url->host.slen != 0); copy_advance_check(buf, url->host); /* Only print port if it is explicitly specified. * Port is not allowed in To and From header. */ /* Unfortunately some UA requires us to send back the port * number exactly as it was sent. We don't remember whether an * UA has sent us port, so we'll just send the port indiscrimately */ //PJ_TODO(SHOULD_DISALLOW_URI_PORT_IN_FROM_TO_HEADER) if (url->port && context != PJSIP_URI_IN_FROMTO_HDR) { if (endbuf - buf < 10) return -1; *buf++ = ':'; printed = pj_utoa(url->port, buf); buf += printed; } /* User param is allowed in all contexes */ copy_advance_pair_check(buf, ";user="******";method=", 8, url->method_param, pjsip_PARAM_CHAR_SPEC); } /* Transport is not allowed in From/To header. */ if (context != PJSIP_URI_IN_FROMTO_HDR) { copy_advance_pair_escape(buf, ";transport=", 11, url->transport_param, pjsip_PARAM_CHAR_SPEC); } /* TTL param is not allowed in From, To, Route, and Record-Route header. */ if (url->ttl_param >= 0 && context != PJSIP_URI_IN_FROMTO_HDR && context != PJSIP_URI_IN_ROUTING_HDR) { if (endbuf - buf < 15) return -1; pj_memcpy(buf, ";ttl=", 5); printed = pj_utoa(url->ttl_param, buf+5); buf += printed + 5; } /* maddr param is not allowed in From and To header. */ if (context != PJSIP_URI_IN_FROMTO_HDR) { copy_advance_pair_escape(buf, ";maddr=", 7, url->maddr_param, pjsip_PARAM_CHAR_SPEC); } /* lr param is not allowed in From, To, and Contact header. */ if (url->lr_param && context != PJSIP_URI_IN_FROMTO_HDR && context != PJSIP_URI_IN_CONTACT_HDR) { pj_str_t lr = { ";lr", 3 }; if (endbuf - buf < 3) return -1; copy_advance_check(buf, lr); } /* Other param. */ printed = pjsip_param_print_on(&url->other_param, buf, endbuf-buf, &pjsip_PARAM_CHAR_SPEC, &pjsip_PARAM_CHAR_SPEC, ';'); if (printed < 0) return -1; buf += printed; /* Header param. * Header param is only allowed in these contexts: * - PJSIP_URI_IN_CONTACT_HDR * - PJSIP_URI_IN_OTHER */ if (context == PJSIP_URI_IN_CONTACT_HDR || context == PJSIP_URI_IN_OTHER) { printed = pjsip_param_print_on(&url->header_param, buf, endbuf-buf, &pjsip_HDR_CHAR_SPEC, &pjsip_HDR_CHAR_SPEC, '?'); if (printed < 0) return -1; buf += printed; } *buf = '\0'; return buf-startbuf; }
/* * Release write lock. * */ PJ_DEF(pj_status_t) pj_rwmutex_unlock_write(pj_rwmutex_t *mutex) { PJ_ASSERT_RETURN(mutex, PJ_EINVAL); pj_assert(mutex->reader_count <= 1); return pj_sem_post(mutex->write_lock); }
/* * Convert text to IPv4/IPv6 address. */ PJ_DEF(pj_status_t) pj_inet_pton(int af, const pj_str_t *src, void *dst) { char tempaddr[PJ_INET6_ADDRSTRLEN]; PJ_ASSERT_RETURN(af==PJ_AF_INET || af==PJ_AF_INET6, PJ_EAFNOTSUP); PJ_ASSERT_RETURN(src && src->slen && dst, PJ_EINVAL); /* Initialize output with PJ_IN_ADDR_NONE for IPv4 (to be * compatible with pj_inet_aton() */ if (af==PJ_AF_INET) { ((pj_in_addr*)dst)->s_addr = PJ_INADDR_NONE; } /* Caution: * this function might be called with cp->slen >= 46 * (i.e. when called with hostname to check if it's an IP addr). */ if (src->slen >= PJ_INET6_ADDRSTRLEN) { return PJ_ENAMETOOLONG; } pj_memcpy(tempaddr, src->ptr, src->slen); tempaddr[src->slen] = '\0'; #if defined(PJ_SOCK_HAS_INET_PTON) && PJ_SOCK_HAS_INET_PTON != 0 /* * Implementation using inet_pton() */ if (inet_pton(af, tempaddr, dst) != 1) { pj_status_t status = pj_get_netos_error(); if (status == PJ_SUCCESS) status = PJ_EUNKNOWN; return status; } return PJ_SUCCESS; #elif defined(PJ_WIN32) || defined(PJ_WIN64) || defined(PJ_WIN32_WINCE) /* * Implementation on Windows, using WSAStringToAddress(). * Should also work on Unicode systems. */ { PJ_DECL_UNICODE_TEMP_BUF(wtempaddr,PJ_INET6_ADDRSTRLEN) pj_sockaddr sock_addr; int addr_len = sizeof(sock_addr); int rc; sock_addr.addr.sa_family = (pj_uint16_t)af; rc = WSAStringToAddress( PJ_STRING_TO_NATIVE(tempaddr,wtempaddr,sizeof(wtempaddr)), af, NULL, (LPSOCKADDR)&sock_addr, &addr_len); if (rc != 0) { /* If you get rc 130022 Invalid argument (WSAEINVAL) with IPv6, * check that you have IPv6 enabled (install it in the network * adapter). */ pj_status_t status = pj_get_netos_error(); if (status == PJ_SUCCESS) status = PJ_EUNKNOWN; return status; } if (sock_addr.addr.sa_family == PJ_AF_INET) { pj_memcpy(dst, &sock_addr.ipv4.sin_addr, 4); return PJ_SUCCESS; } else if (sock_addr.addr.sa_family == PJ_AF_INET6) { pj_memcpy(dst, &sock_addr.ipv6.sin6_addr, 16); return PJ_SUCCESS; } else { pj_assert(!"Shouldn't happen"); return PJ_EBUG; } } #elif !defined(PJ_HAS_IPV6) || PJ_HAS_IPV6==0 /* IPv6 support is disabled, just return error without raising assertion */ return PJ_EIPV6NOTSUP; #else pj_assert(!"Not supported"); return PJ_EIPV6NOTSUP; #endif }
static int purity_test(void) { PJ_LOG(3,("test", "...purity_test()")); #if defined(PJ_SOCKADDR_HAS_LEN) && PJ_SOCKADDR_HAS_LEN!=0 /* Check on "sin_len" member of sockaddr */ { const pj_str_t str_ip = {"1.1.1.1", 7}; pj_sockaddr addr[16]; pj_addrinfo ai[16]; unsigned cnt; pj_status_t rc; /* pj_enum_ip_interface() */ cnt = PJ_ARRAY_SIZE(addr); rc = pj_enum_ip_interface(pj_AF_UNSPEC(), &cnt, addr); if (rc == PJ_SUCCESS) { while (cnt--) CHECK_SA_ZERO_LEN(&addr[cnt], -10); } /* pj_gethostip() on IPv4 */ rc = pj_gethostip(pj_AF_INET(), &addr[0]); if (rc == PJ_SUCCESS) CHECK_SA_ZERO_LEN(&addr[0], -20); /* pj_gethostip() on IPv6 */ rc = pj_gethostip(pj_AF_INET6(), &addr[0]); if (rc == PJ_SUCCESS) CHECK_SA_ZERO_LEN(&addr[0], -30); /* pj_getdefaultipinterface() on IPv4 */ rc = pj_getdefaultipinterface(pj_AF_INET(), &addr[0]); if (rc == PJ_SUCCESS) CHECK_SA_ZERO_LEN(&addr[0], -40); /* pj_getdefaultipinterface() on IPv6 */ rc = pj_getdefaultipinterface(pj_AF_INET6(), &addr[0]); if (rc == PJ_SUCCESS) CHECK_SA_ZERO_LEN(&addr[0], -50); /* pj_getaddrinfo() on a host name */ cnt = PJ_ARRAY_SIZE(ai); rc = pj_getaddrinfo(pj_AF_UNSPEC(), pj_gethostname(), &cnt, ai); if (rc == PJ_SUCCESS) { while (cnt--) CHECK_SA_ZERO_LEN(&ai[cnt].ai_addr, -60); } /* pj_getaddrinfo() on an IP address */ cnt = PJ_ARRAY_SIZE(ai); rc = pj_getaddrinfo(pj_AF_UNSPEC(), &str_ip, &cnt, ai); if (rc == PJ_SUCCESS) { pj_assert(cnt == 1); CHECK_SA_ZERO_LEN(&ai[0].ai_addr, -70); } } #endif return 0; }
static int parse_test(void) { #define IPv4 1 #define IPv6 2 struct test_t { const char *input; int result_af; const char *result_ip; pj_uint16_t result_port; }; struct test_t valid_tests[] = { /* IPv4 */ { "10.0.0.1:80", IPv4, "10.0.0.1", 80}, { "10.0.0.1", IPv4, "10.0.0.1", 0}, { "10.0.0.1:", IPv4, "10.0.0.1", 0}, { "10.0.0.1:0", IPv4, "10.0.0.1", 0}, { ":80", IPv4, "0.0.0.0", 80}, { ":", IPv4, "0.0.0.0", 0}, #if !PJ_SYMBIAN { "localhost", IPv4, "127.0.0.1", 0}, { "localhost:", IPv4, "127.0.0.1", 0}, { "localhost:80", IPv4, "127.0.0.1", 80}, #endif #if defined(PJ_HAS_IPV6) && PJ_HAS_IPV6 { "fe::01:80", IPv6, "fe::01:80", 0}, { "[fe::01]:80", IPv6, "fe::01", 80}, { "fe::01", IPv6, "fe::01", 0}, { "[fe::01]", IPv6, "fe::01", 0}, { "fe::01:", IPv6, "fe::01", 0}, { "[fe::01]:", IPv6, "fe::01", 0}, { "::", IPv6, "::0", 0}, { "[::]", IPv6, "::", 0}, { ":::", IPv6, "::", 0}, { "[::]:", IPv6, "::", 0}, { ":::80", IPv6, "::", 80}, { "[::]:80", IPv6, "::", 80}, #endif }; struct test_t invalid_tests[] = { /* IPv4 */ { "10.0.0.1:abcd", IPv4}, /* port not numeric */ { "10.0.0.1:-1", IPv4}, /* port contains illegal character */ { "10.0.0.1:123456", IPv4}, /* port too big */ { "1.2.3.4.5:80", IPv4}, /* invalid IP */ { "10:0:80", IPv4}, /* hostname has colon */ #if defined(PJ_HAS_IPV6) && PJ_HAS_IPV6 { "[fe::01]:abcd", IPv6}, /* port not numeric */ { "[fe::01]:-1", IPv6}, /* port contains illegal character */ { "[fe::01]:123456", IPv6}, /* port too big */ { "fe::01:02::03:04:80", IPv6}, /* invalid IP */ { "[fe::01:02::03:04]:80", IPv6}, /* invalid IP */ { "[fe:01", IPv6}, /* Unterminated bracket */ #endif }; unsigned i; PJ_LOG(3,("test", "...IP address parsing")); for (i=0; i<PJ_ARRAY_SIZE(valid_tests); ++i) { pj_status_t status; pj_str_t input; pj_sockaddr addr, result; switch (valid_tests[i].result_af) { case IPv4: valid_tests[i].result_af = PJ_AF_INET; break; case IPv6: valid_tests[i].result_af = PJ_AF_INET6; break; default: pj_assert(!"Invalid AF!"); continue; } /* Try parsing with PJ_AF_UNSPEC */ status = pj_sockaddr_parse(PJ_AF_UNSPEC, 0, pj_cstr(&input, valid_tests[i].input), &addr); if (status != PJ_SUCCESS) { PJ_LOG(1,("test", ".... failed when parsing %s (i=%d)", valid_tests[i].input, i)); return -10; } /* Check "sin_len" member of parse result */ CHECK_SA_ZERO_LEN(&addr, -20); /* Build the correct result */ status = pj_sockaddr_init(valid_tests[i].result_af, &result, pj_cstr(&input, valid_tests[i].result_ip), valid_tests[i].result_port); if (status != PJ_SUCCESS) { PJ_LOG(1,("test", ".... error building IP address %s", valid_tests[i].input)); return -30; } /* Compare the result */ if (pj_sockaddr_cmp(&addr, &result) != 0) { PJ_LOG(1,("test", ".... parsed result mismatched for %s", valid_tests[i].input)); return -40; } /* Parse again with the specified af */ status = pj_sockaddr_parse(valid_tests[i].result_af, 0, pj_cstr(&input, valid_tests[i].input), &addr); if (status != PJ_SUCCESS) { PJ_LOG(1,("test", ".... failed when parsing %s", valid_tests[i].input)); return -50; } /* Check "sin_len" member of parse result */ CHECK_SA_ZERO_LEN(&addr, -55); /* Compare the result again */ if (pj_sockaddr_cmp(&addr, &result) != 0) { PJ_LOG(1,("test", ".... parsed result mismatched for %s", valid_tests[i].input)); return -60; } } for (i=0; i<PJ_ARRAY_SIZE(invalid_tests); ++i) { pj_status_t status; pj_str_t input; pj_sockaddr addr; switch (invalid_tests[i].result_af) { case IPv4: invalid_tests[i].result_af = PJ_AF_INET; break; case IPv6: invalid_tests[i].result_af = PJ_AF_INET6; break; default: pj_assert(!"Invalid AF!"); continue; } /* Try parsing with PJ_AF_UNSPEC */ status = pj_sockaddr_parse(PJ_AF_UNSPEC, 0, pj_cstr(&input, invalid_tests[i].input), &addr); if (status == PJ_SUCCESS) { PJ_LOG(1,("test", ".... expecting failure when parsing %s", invalid_tests[i].input)); return -100; } } return 0; }
/* * pj_init(void). * Init PJLIB! */ PJ_DEF(pj_status_t) pj_init(void) { char dummy_guid[PJ_GUID_MAX_LENGTH]; pj_str_t guid; pj_status_t rc; /* Check if PJLIB have been initialized */ if (initialized) { ++initialized; return PJ_SUCCESS; } #if PJ_HAS_THREADS /* Init this thread's TLS. */ if ((rc=pj_thread_init()) != 0) { return rc; } /* Critical section. */ if ((rc=init_mutex(&critical_section, "critsec", PJ_MUTEX_RECURSE)) != 0) return rc; #endif /* Init logging */ pj_log_init(); /* Initialize exception ID for the pool. * Must do so after critical section is configured. */ rc = pj_exception_id_alloc("PJLIB/No memory", &PJ_NO_MEMORY_EXCEPTION); if (rc != PJ_SUCCESS) return rc; /* Init random seed. */ /* Or probably not. Let application in charge of this */ /* pj_srand( clock() ); */ /* Startup GUID. */ guid.ptr = dummy_guid; pj_generate_unique_string( &guid ); /* Startup timestamp */ #if defined(PJ_HAS_HIGH_RES_TIMER) && PJ_HAS_HIGH_RES_TIMER != 0 { pj_timestamp dummy_ts; if ((rc=pj_get_timestamp(&dummy_ts)) != 0) { return rc; } } #endif /* Flag PJLIB as initialized */ ++initialized; pj_assert(initialized == 1); PJ_LOG(4,(THIS_FILE, "pjlib %s for POSIX initialized", PJ_VERSION)); return PJ_SUCCESS; }
/* * pj_thread_create(...) */ PJ_DEF(pj_status_t) pj_thread_create( pj_pool_t *pool, const char *thread_name, pj_thread_proc *proc, void *arg, pj_size_t stack_size, unsigned flags, pj_thread_t **ptr_thread) { #if PJ_HAS_THREADS pj_thread_t *rec; pthread_attr_t thread_attr; void *stack_addr; int rc; PJ_UNUSED_ARG(stack_addr); PJ_CHECK_STACK(); PJ_ASSERT_RETURN(pool && proc && ptr_thread, PJ_EINVAL); /* Create thread record and assign name for the thread */ rec = (struct pj_thread_t*) pj_pool_zalloc(pool, sizeof(pj_thread_t)); PJ_ASSERT_RETURN(rec, PJ_ENOMEM); /* Set name. */ if (!thread_name) thread_name = "thr%p"; if (strchr(thread_name, '%')) { pj_ansi_snprintf(rec->obj_name, PJ_MAX_OBJ_NAME, thread_name, rec); } else { strncpy(rec->obj_name, thread_name, PJ_MAX_OBJ_NAME); rec->obj_name[PJ_MAX_OBJ_NAME-1] = '\0'; } /* Set default stack size */ if (stack_size == 0) stack_size = PJ_THREAD_DEFAULT_STACK_SIZE; #if defined(PJ_OS_HAS_CHECK_STACK) && PJ_OS_HAS_CHECK_STACK!=0 rec->stk_size = stack_size; rec->stk_max_usage = 0; #endif /* Emulate suspended thread with mutex. */ if (flags & PJ_THREAD_SUSPENDED) { rc = pj_mutex_create_simple(pool, NULL, &rec->suspended_mutex); if (rc != PJ_SUCCESS) { return rc; } pj_mutex_lock(rec->suspended_mutex); } else { pj_assert(rec->suspended_mutex == NULL); } /* Init thread attributes */ pthread_attr_init(&thread_attr); #if defined(PJ_THREAD_SET_STACK_SIZE) && PJ_THREAD_SET_STACK_SIZE!=0 /* Set thread's stack size */ rc = pthread_attr_setstacksize(&thread_attr, stack_size); if (rc != 0) return PJ_RETURN_OS_ERROR(rc); #endif /* PJ_THREAD_SET_STACK_SIZE */ #if defined(PJ_THREAD_ALLOCATE_STACK) && PJ_THREAD_ALLOCATE_STACK!=0 /* Allocate memory for the stack */ stack_addr = pj_pool_alloc(pool, stack_size); PJ_ASSERT_RETURN(stack_addr, PJ_ENOMEM); rc = pthread_attr_setstackaddr(&thread_attr, stack_addr); if (rc != 0) return PJ_RETURN_OS_ERROR(rc); #endif /* PJ_THREAD_ALLOCATE_STACK */ /* Create the thread. */ rec->proc = proc; rec->arg = arg; rc = pthread_create( &rec->thread, &thread_attr, &thread_main, rec); if (rc != 0) { return PJ_RETURN_OS_ERROR(rc); } *ptr_thread = rec; PJ_LOG(6, (rec->obj_name, "Thread created")); return PJ_SUCCESS; #else pj_assert(!"Threading is disabled!"); return PJ_EINVALIDOP; #endif }
static const pj_str_t *pjsip_name_addr_get_scheme(const pjsip_name_addr *name) { pj_assert(name->uri != NULL); return pjsip_uri_get_scheme(name->uri); }
PJ_DEF(void* )pjmedia_transport_zrtp_getUserData(pjmedia_transport *tp){ struct tp_zrtp *zrtp = (struct tp_zrtp*)tp; pj_assert(tp); return zrtp->userCallback->userData; }
/* * fake_udp_on_read_complete() * * This is callback notification from ioqueue that a pending recvfrom() * operation has completed. */ static void fake_udp_on_read_complete( pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, pj_ssize_t bytes_read) { /* See https://trac.pjsip.org/repos/ticket/1197 */ enum { MAX_IMMEDIATE_PACKET = 50 }; pjsip_rx_data_op_key *rdata_op_key = (pjsip_rx_data_op_key*) op_key; pjsip_rx_data *rdata = rdata_op_key->rdata; struct fake_udp_transport *tp = (struct fake_udp_transport*)rdata->tp_info.transport; int i; pj_status_t status; /* Don't do anything if transport is closing. */ if (tp->is_closing) { tp->is_closing++; return; } /* Don't do anything if transport is being paused. */ if (tp->is_paused) return; /* * The idea of the loop is to process immediate data received by * pj_ioqueue_recvfrom(), as long as i < MAX_IMMEDIATE_PACKET. When * i is >= MAX_IMMEDIATE_PACKET, we force the recvfrom() operation to * complete asynchronously, to allow other sockets to get their data. */ for (i=0;; ++i) { enum { MIN_SIZE = 32 }; pj_uint32_t flags; /* Report the packet to transport manager. Only do so if packet size * is relatively big enough for a SIP packet. */ if (bytes_read > MIN_SIZE) { pj_size_t size_eaten; const pj_sockaddr *src_addr = &rdata->pkt_info.src_addr; /* Init pkt_info part. */ rdata->pkt_info.len = bytes_read; rdata->pkt_info.zero = 0; pj_gettimeofday(&rdata->pkt_info.timestamp); pj_ansi_strcpy(rdata->pkt_info.src_name, pj_inet_ntoa(src_addr->ipv4.sin_addr)); rdata->pkt_info.src_port = pj_ntohs(src_addr->ipv4.sin_port); size_eaten = pjsip_tpmgr_receive_packet(rdata->tp_info.transport->tpmgr, rdata); if (size_eaten < 0) { pj_assert(!"It shouldn't happen!"); size_eaten = rdata->pkt_info.len; } /* Since this is FAKE_UDP, the whole buffer is the message. */ rdata->pkt_info.len = 0; } else if (bytes_read <= MIN_SIZE) { /* TODO: */ } else if (-bytes_read != PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK) && -bytes_read != PJ_STATUS_FROM_OS(OSERR_EINPROGRESS) && -bytes_read != PJ_STATUS_FROM_OS(OSERR_ECONNRESET)) { /* Report error to endpoint. */ PJSIP_ENDPT_LOG_ERROR((rdata->tp_info.transport->endpt, rdata->tp_info.transport->obj_name, -bytes_read, "Warning: pj_ioqueue_recvfrom()" " callback error")); } if (i >= MAX_IMMEDIATE_PACKET) { /* Force ioqueue_recvfrom() to return PJ_EPENDING */ flags = PJ_IOQUEUE_ALWAYS_ASYNC; } else { flags = 0; } /* Reset pool. * Need to copy rdata fields to temp variable because they will * be invalid after pj_pool_reset(). */ { pj_pool_t *rdata_pool = rdata->tp_info.pool; struct fake_udp_transport *rdata_tp ; unsigned rdata_index; rdata_tp = (struct fake_udp_transport*)rdata->tp_info.transport; rdata_index = (unsigned)(unsigned long)rdata->tp_info.tp_data; pj_pool_reset(rdata_pool); init_rdata(rdata_tp, rdata_index, rdata_pool, &rdata); /* Change some vars to point to new location after * pool reset. */ op_key = &rdata->tp_info.op_key.op_key; } /* Only read next packet if transport is not being paused. This * check handles the case where transport is paused while endpoint * is still processing a SIP message. */ if (tp->is_paused) return; /* Read next packet. */ bytes_read = sizeof(rdata->pkt_info.packet); rdata->pkt_info.src_addr_len = sizeof(rdata->pkt_info.src_addr); status = pj_ioqueue_recvfrom(key, op_key, rdata->pkt_info.packet, &bytes_read, flags, &rdata->pkt_info.src_addr, &rdata->pkt_info.src_addr_len); if (status == PJ_SUCCESS) { /* Continue loop. */ pj_assert(i < MAX_IMMEDIATE_PACKET); } else if (status == PJ_EPENDING) { break; } else { if (i < MAX_IMMEDIATE_PACKET) { /* Report error to endpoint if this is not EWOULDBLOCK error.*/ if (status != PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK) && status != PJ_STATUS_FROM_OS(OSERR_EINPROGRESS) && status != PJ_STATUS_FROM_OS(OSERR_ECONNRESET)) { PJSIP_ENDPT_LOG_ERROR((rdata->tp_info.transport->endpt, rdata->tp_info.transport->obj_name, status, "Warning: pj_ioqueue_recvfrom")); } /* Continue loop. */ bytes_read = 0; } else { /* This is fatal error. * Ioqueue operation will stop for this transport! */ PJSIP_ENDPT_LOG_ERROR((rdata->tp_info.transport->endpt, rdata->tp_info.transport->obj_name, status, "FATAL: pj_ioqueue_recvfrom() error, " "FAKE_UDP transport stopping! Error")); break; } } } }
/* * pj_ioqueue_poll() * */ PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioqueue, const pj_time_val *timeout) { int i, count, processed; int msec; //struct epoll_event *events = ioqueue->events; //struct queue *queue = ioqueue->queue; struct epoll_event events[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL]; struct queue queue[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL]; pj_timestamp t1, t2; PJ_CHECK_STACK(); msec = timeout ? PJ_TIME_VAL_MSEC(*timeout) : 9000; TRACE_((THIS_FILE, "start os_epoll_wait, msec=%d", msec)); pj_get_timestamp(&t1); //count = os_epoll_wait( ioqueue->epfd, events, ioqueue->max, msec); count = os_epoll_wait( ioqueue->epfd, events, PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL, msec); if (count == 0) { #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Check the closing keys only when there's no activity and when there are * pending closing keys. */ if (count == 0 && !pj_list_empty(&ioqueue->closing_list)) { pj_lock_acquire(ioqueue->lock); scan_closing_keys(ioqueue); pj_lock_release(ioqueue->lock); } #endif TRACE_((THIS_FILE, "os_epoll_wait timed out")); return count; } else if (count < 0) { TRACE_((THIS_FILE, "os_epoll_wait error")); return -pj_get_netos_error(); } pj_get_timestamp(&t2); TRACE_((THIS_FILE, "os_epoll_wait returns %d, time=%d usec", count, pj_elapsed_usec(&t1, &t2))); /* Lock ioqueue. */ pj_lock_acquire(ioqueue->lock); for (processed=0, i=0; i<count; ++i) { pj_ioqueue_key_t *h = (pj_ioqueue_key_t*)(epoll_data_type) events[i].epoll_data; TRACE_((THIS_FILE, "event %d: events=%d", i, events[i].events)); /* * Check readability. */ if ((events[i].events & EPOLLIN) && (key_has_pending_read(h) || key_has_pending_accept(h)) && !IS_CLOSING(h) ) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[processed].key = h; queue[processed].event_type = READABLE_EVENT; ++processed; } /* * Check for writeability. */ if ((events[i].events & EPOLLOUT) && key_has_pending_write(h) && !IS_CLOSING(h)) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[processed].key = h; queue[processed].event_type = WRITEABLE_EVENT; ++processed; } #if PJ_HAS_TCP /* * Check for completion of connect() operation. */ if ((events[i].events & EPOLLOUT) && (h->connecting) && !IS_CLOSING(h)) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[processed].key = h; queue[processed].event_type = WRITEABLE_EVENT; ++processed; } #endif /* PJ_HAS_TCP */ /* * Check for error condition. */ if (events[i].events & EPOLLERR && (h->connecting) && !IS_CLOSING(h)) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[processed].key = h; queue[processed].event_type = EXCEPTION_EVENT; ++processed; } } pj_lock_release(ioqueue->lock); /* Now process the events. */ for (i=0; i<processed; ++i) { switch (queue[i].event_type) { case READABLE_EVENT: ioqueue_dispatch_read_event(ioqueue, queue[i].key); break; case WRITEABLE_EVENT: ioqueue_dispatch_write_event(ioqueue, queue[i].key); break; case EXCEPTION_EVENT: ioqueue_dispatch_exception_event(ioqueue, queue[i].key); break; case NO_EVENT: pj_assert(!"Invalid event!"); break; } #if PJ_IOQUEUE_HAS_SAFE_UNREG decrement_counter(queue[i].key); #endif } /* Special case: * When epoll returns > 0 but no descriptors are actually set! */ if (count > 0 && !processed && msec > 0) { pj_thread_sleep(msec); } pj_get_timestamp(&t1); TRACE_((THIS_FILE, "ioqueue_poll() returns %d, time=%d usec", processed, pj_elapsed_usec(&t2, &t1))); return processed; }
/* * ioqueue_dispatch_event() * * Report occurence of an event in the key to be processed by the * framework. */ void ioqueue_dispatch_write_event(pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h) { /* Lock the key. */ pj_mutex_lock(h->mutex); if (IS_CLOSING(h)) { pj_mutex_unlock(h->mutex); return; } #if defined(PJ_HAS_TCP) && PJ_HAS_TCP!=0 if (h->connecting) { /* Completion of connect() operation */ pj_status_t status; pj_bool_t has_lock; /* Clear operation. */ h->connecting = 0; ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT); ioqueue_remove_from_set(ioqueue, h, EXCEPTION_EVENT); #if (defined(PJ_HAS_SO_ERROR) && PJ_HAS_SO_ERROR!=0) /* from connect(2): * On Linux, use getsockopt to read the SO_ERROR option at * level SOL_SOCKET to determine whether connect() completed * successfully (if SO_ERROR is zero). */ { int value; int vallen = sizeof(value); int gs_rc = pj_sock_getsockopt(h->fd, SOL_SOCKET, SO_ERROR, &value, &vallen); if (gs_rc != 0) { /* Argh!! What to do now??? * Just indicate that the socket is connected. The * application will get error as soon as it tries to use * the socket to send/receive. */ status = PJ_SUCCESS; } else { status = PJ_STATUS_FROM_OS(value); } } #elif defined(PJ_WIN32) && PJ_WIN32!=0 status = PJ_SUCCESS; /* success */ #else /* Excellent information in D.J. Bernstein page: * http://cr.yp.to/docs/connect.html * * Seems like the most portable way of detecting connect() * failure is to call getpeername(). If socket is connected, * getpeername() will return 0. If the socket is not connected, * it will return ENOTCONN, and read(fd, &ch, 1) will produce * the right errno through error slippage. This is a combination * of suggestions from Douglas C. Schmidt and Ken Keys. */ { struct sockaddr_in addr; int addrlen = sizeof(addr); status = pj_sock_getpeername(h->fd, (struct sockaddr*)&addr, &addrlen); } #endif /* Unlock; from this point we don't need to hold key's mutex * (unless concurrency is disabled, which in this case we should * hold the mutex while calling the callback) */ if (h->allow_concurrent) { /* concurrency may be changed while we're in the callback, so * save it to a flag. */ has_lock = PJ_FALSE; pj_mutex_unlock(h->mutex); } else { has_lock = PJ_TRUE; } /* Call callback. */ if (h->cb.on_connect_complete && !IS_CLOSING(h)) (*h->cb.on_connect_complete)(h, status); /* Unlock if we still hold the lock */ if (has_lock) { pj_mutex_unlock(h->mutex); } /* Done. */ } else #endif /* PJ_HAS_TCP */ if (key_has_pending_write(h)) { /* Socket is writable. */ struct write_operation *write_op; pj_ssize_t sent; pj_status_t send_rc = PJ_SUCCESS; /* Get the first in the queue. */ write_op = h->write_list.next; /* For datagrams, we can remove the write_op from the list * so that send() can work in parallel. */ if (h->fd_type == pj_SOCK_DGRAM()) { pj_list_erase(write_op); if (pj_list_empty(&h->write_list)) ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT); } /* Send the data. * Unfortunately we must do this while holding key's mutex, thus * preventing parallel write on a single key.. :-(( */ sent = write_op->size - write_op->written; if (write_op->op == PJ_IOQUEUE_OP_SEND) { send_rc = pj_sock_send(h->fd, write_op->buf+write_op->written, &sent, write_op->flags); /* Can't do this. We only clear "op" after we're finished sending * the whole buffer. */ //write_op->op = 0; } else if (write_op->op == PJ_IOQUEUE_OP_SEND_TO) { int retry = 2; while (--retry >= 0) { send_rc = pj_sock_sendto(h->fd, write_op->buf+write_op->written, &sent, write_op->flags, &write_op->rmt_addr, write_op->rmt_addrlen); #if defined(PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT) && \ PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT!=0 /* Special treatment for dead UDP sockets here, see ticket #1107 */ if (send_rc==PJ_STATUS_FROM_OS(EPIPE) && !IS_CLOSING(h) && h->fd_type==pj_SOCK_DGRAM()) { PJ_PERROR(4,(THIS_FILE, send_rc, "Send error for socket %d, retrying", h->fd)); replace_udp_sock(h); continue; } #endif break; } /* Can't do this. We only clear "op" after we're finished sending * the whole buffer. */ //write_op->op = 0; } else { pj_assert(!"Invalid operation type!"); write_op->op = PJ_IOQUEUE_OP_NONE; send_rc = PJ_EBUG; } if (send_rc == PJ_SUCCESS) { write_op->written += sent; } else { pj_assert(send_rc > 0); write_op->written = -send_rc; } /* Are we finished with this buffer? */ if (send_rc!=PJ_SUCCESS || write_op->written == (pj_ssize_t)write_op->size || h->fd_type == pj_SOCK_DGRAM()) { pj_bool_t has_lock; write_op->op = PJ_IOQUEUE_OP_NONE; if (h->fd_type != pj_SOCK_DGRAM()) { /* Write completion of the whole stream. */ pj_list_erase(write_op); /* Clear operation if there's no more data to send. */ if (pj_list_empty(&h->write_list)) ioqueue_remove_from_set(ioqueue, h, WRITEABLE_EVENT); } /* Unlock; from this point we don't need to hold key's mutex * (unless concurrency is disabled, which in this case we should * hold the mutex while calling the callback) */ if (h->allow_concurrent) { /* concurrency may be changed while we're in the callback, so * save it to a flag. */ has_lock = PJ_FALSE; pj_mutex_unlock(h->mutex); } else { has_lock = PJ_TRUE; } /* Call callback. */ if (h->cb.on_write_complete && !IS_CLOSING(h)) { (*h->cb.on_write_complete)(h, (pj_ioqueue_op_key_t*)write_op, write_op->written); } if (has_lock) { pj_mutex_unlock(h->mutex); } } else { pj_mutex_unlock(h->mutex); } /* Done. */ } else { /* * This is normal; execution may fall here when multiple threads * are signalled for the same event, but only one thread eventually * able to process the event. */ pj_mutex_unlock(h->mutex); } }
/* * pj_init(void). * Init PJLIB! */ PJ_DEF(pj_status_t) pj_init(void) { WSADATA wsa; char dummy_guid[32]; /* use maximum GUID length */ pj_str_t guid; pj_status_t rc; /* Check if PJLIB have been initialized */ if (initialized) { ++initialized; return PJ_SUCCESS; } /* Init Winsock.. */ if (WSAStartup(MAKEWORD(2,0), &wsa) != 0) { return PJ_RETURN_OS_ERROR(WSAGetLastError()); } /* Init this thread's TLS. */ if ((rc=pj_thread_init()) != PJ_SUCCESS) { return rc; } /* Init logging */ pj_log_init(); /* Init random seed. */ /* Or probably not. Let application in charge of this */ /* pj_srand( GetCurrentProcessId() ); */ /* Initialize critical section. */ if ((rc=init_mutex(&critical_section_mutex, "pj%p")) != PJ_SUCCESS) return rc; /* Startup GUID. */ guid.ptr = dummy_guid; pj_generate_unique_string( &guid ); /* Initialize exception ID for the pool. * Must do so after critical section is configured. */ rc = pj_exception_id_alloc("PJLIB/No memory", &PJ_NO_MEMORY_EXCEPTION); if (rc != PJ_SUCCESS) return rc; /* Startup timestamp */ #if defined(PJ_HAS_HIGH_RES_TIMER) && PJ_HAS_HIGH_RES_TIMER != 0 { pj_timestamp dummy_ts; if ((rc=pj_get_timestamp_freq(&dummy_ts)) != PJ_SUCCESS) { return rc; } if ((rc=pj_get_timestamp(&dummy_ts)) != PJ_SUCCESS) { return rc; } } #endif /* Flag PJLIB as initialized */ ++initialized; pj_assert(initialized == 1); PJ_LOG(4,(THIS_FILE, "pjlib %s for win32 initialized", PJ_VERSION)); return PJ_SUCCESS; }
void ioqueue_dispatch_read_event( pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *h ) { pj_status_t rc; /* Lock the key. */ pj_mutex_lock(h->mutex); if (IS_CLOSING(h)) { pj_mutex_unlock(h->mutex); return; } # if PJ_HAS_TCP if (!pj_list_empty(&h->accept_list)) { struct accept_operation *accept_op; pj_bool_t has_lock; /* Get one accept operation from the list. */ accept_op = h->accept_list.next; pj_list_erase(accept_op); accept_op->op = PJ_IOQUEUE_OP_NONE; /* Clear bit in fdset if there is no more pending accept */ if (pj_list_empty(&h->accept_list)) ioqueue_remove_from_set(ioqueue, h, READABLE_EVENT); rc=pj_sock_accept(h->fd, accept_op->accept_fd, accept_op->rmt_addr, accept_op->addrlen); if (rc==PJ_SUCCESS && accept_op->local_addr) { rc = pj_sock_getsockname(*accept_op->accept_fd, accept_op->local_addr, accept_op->addrlen); } /* Unlock; from this point we don't need to hold key's mutex * (unless concurrency is disabled, which in this case we should * hold the mutex while calling the callback) */ if (h->allow_concurrent) { /* concurrency may be changed while we're in the callback, so * save it to a flag. */ has_lock = PJ_FALSE; pj_mutex_unlock(h->mutex); } else { has_lock = PJ_TRUE; } /* Call callback. */ if (h->cb.on_accept_complete && !IS_CLOSING(h)) { (*h->cb.on_accept_complete)(h, (pj_ioqueue_op_key_t*)accept_op, *accept_op->accept_fd, rc); } if (has_lock) { pj_mutex_unlock(h->mutex); } } else # endif if (key_has_pending_read(h)) { struct read_operation *read_op; pj_ssize_t bytes_read; pj_bool_t has_lock; /* Get one pending read operation from the list. */ read_op = h->read_list.next; pj_list_erase(read_op); /* Clear fdset if there is no pending read. */ if (pj_list_empty(&h->read_list)) ioqueue_remove_from_set(ioqueue, h, READABLE_EVENT); bytes_read = read_op->size; if ((read_op->op == PJ_IOQUEUE_OP_RECV_FROM)) { read_op->op = PJ_IOQUEUE_OP_NONE; rc = pj_sock_recvfrom(h->fd, read_op->buf, &bytes_read, read_op->flags, read_op->rmt_addr, read_op->rmt_addrlen); } else if ((read_op->op == PJ_IOQUEUE_OP_RECV)) { read_op->op = PJ_IOQUEUE_OP_NONE; rc = pj_sock_recv(h->fd, read_op->buf, &bytes_read, read_op->flags); } else { pj_assert(read_op->op == PJ_IOQUEUE_OP_READ); read_op->op = PJ_IOQUEUE_OP_NONE; /* * User has specified pj_ioqueue_read(). * On Win32, we should do ReadFile(). But because we got * here because of select() anyway, user must have put a * socket descriptor on h->fd, which in this case we can * just call pj_sock_recv() instead of ReadFile(). * On Unix, user may put a file in h->fd, so we'll have * to call read() here. * This may not compile on systems which doesn't have * read(). That's why we only specify PJ_LINUX here so * that error is easier to catch. */ # if defined(PJ_WIN32) && PJ_WIN32 != 0 || \ defined(PJ_WIN32_WINCE) && PJ_WIN32_WINCE != 0 rc = pj_sock_recv(h->fd, read_op->buf, &bytes_read, read_op->flags); //rc = ReadFile((HANDLE)h->fd, read_op->buf, read_op->size, // &bytes_read, NULL); # elif (defined(PJ_HAS_UNISTD_H) && PJ_HAS_UNISTD_H != 0) bytes_read = read(h->fd, read_op->buf, bytes_read); rc = (bytes_read >= 0) ? PJ_SUCCESS : pj_get_os_error(); # elif defined(PJ_LINUX_KERNEL) && PJ_LINUX_KERNEL != 0 bytes_read = sys_read(h->fd, read_op->buf, bytes_read); rc = (bytes_read >= 0) ? PJ_SUCCESS : -bytes_read; # else # error "Implement read() for this platform!" # endif } if (rc != PJ_SUCCESS) { # if defined(PJ_WIN32) && PJ_WIN32 != 0 /* On Win32, for UDP, WSAECONNRESET on the receive side * indicates that previous sending has triggered ICMP Port * Unreachable message. * But we wouldn't know at this point which one of previous * key that has triggered the error, since UDP socket can * be shared! * So we'll just ignore it! */ if (rc == PJ_STATUS_FROM_OS(WSAECONNRESET)) { //PJ_LOG(4,(THIS_FILE, // "Ignored ICMP port unreach. on key=%p", h)); } # endif /* In any case we would report this to caller. */ bytes_read = -rc; #if defined(PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT) && \ PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT!=0 /* Special treatment for dead UDP sockets here, see ticket #1107 */ if (rc == PJ_STATUS_FROM_OS(ENOTCONN) && !IS_CLOSING(h) && h->fd_type==pj_SOCK_DGRAM()) { replace_udp_sock(h); } #endif } /* Unlock; from this point we don't need to hold key's mutex * (unless concurrency is disabled, which in this case we should * hold the mutex while calling the callback) */ if (h->allow_concurrent) { /* concurrency may be changed while we're in the callback, so * save it to a flag. */ has_lock = PJ_FALSE; pj_mutex_unlock(h->mutex); } else { has_lock = PJ_TRUE; } /* Call callback. */ if (h->cb.on_read_complete && !IS_CLOSING(h)) { (*h->cb.on_read_complete)(h, (pj_ioqueue_op_key_t*)read_op, bytes_read); } if (has_lock) { pj_mutex_unlock(h->mutex); } } else { /* * This is normal; execution may fall here when multiple threads * are signalled for the same event, but only one thread eventually * able to process the event. */ pj_mutex_unlock(h->mutex); } }
/* * pj_ioqueue_unregister() * * Unregister handle from ioqueue. */ PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_key_t *key) { pj_ioqueue_t *ioqueue; struct epoll_event ev; int status; PJ_ASSERT_RETURN(key != NULL, PJ_EINVAL); ioqueue = key->ioqueue; /* Lock the key to make sure no callback is simultaneously modifying * the key. We need to lock the key before ioqueue here to prevent * deadlock. */ pj_ioqueue_lock_key(key); /* Also lock ioqueue */ pj_lock_acquire(ioqueue->lock); pj_assert(ioqueue->count > 0); --ioqueue->count; #if !PJ_IOQUEUE_HAS_SAFE_UNREG pj_list_erase(key); #endif ev.events = 0; ev.epoll_data = (epoll_data_type)key; status = os_epoll_ctl( ioqueue->epfd, EPOLL_CTL_DEL, key->fd, &ev); if (status != 0) { pj_status_t rc = pj_get_os_error(); pj_lock_release(ioqueue->lock); return rc; } /* Destroy the key. */ pj_sock_close(key->fd); pj_lock_release(ioqueue->lock); #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Mark key is closing. */ key->closing = 1; /* Decrement counter. */ decrement_counter(key); /* Done. */ if (key->grp_lock) { /* just dec_ref and unlock. we will set grp_lock to NULL * elsewhere */ pj_grp_lock_t *grp_lock = key->grp_lock; // Don't set grp_lock to NULL otherwise the other thread // will crash. Just leave it as dangling pointer, but this // should be safe //key->grp_lock = NULL; pj_grp_lock_dec_ref_dbg(grp_lock, "ioqueue", 0); pj_grp_lock_release(grp_lock); } else { pj_ioqueue_unlock_key(key); } #else if (key->grp_lock) { /* set grp_lock to NULL and unlock */ pj_grp_lock_t *grp_lock = key->grp_lock; // Don't set grp_lock to NULL otherwise the other thread // will crash. Just leave it as dangling pointer, but this // should be safe //key->grp_lock = NULL; pj_grp_lock_dec_ref_dbg(grp_lock, "ioqueue", 0); pj_grp_lock_release(grp_lock); } else { pj_ioqueue_unlock_key(key); } pj_lock_destroy(key->lock); #endif return PJ_SUCCESS; }
/* Timer callback. When the timer is fired, it can be time to refresh * the session if UA is the refresher, otherwise it is time to end * the session. */ static void timer_cb(pj_timer_heap_t *timer_heap, struct pj_timer_entry *entry) { pjsip_inv_session *inv = (pjsip_inv_session*) entry->user_data; pjsip_tx_data *tdata = NULL; pj_status_t status; pj_bool_t as_refresher; pj_assert(inv); inv->timer->timer.id = 0; PJ_UNUSED_ARG(timer_heap); /* Lock dialog. */ pjsip_dlg_inc_lock(inv->dlg); /* Check our role */ as_refresher = (inv->timer->refresher == TR_UAC && inv->timer->role == PJSIP_ROLE_UAC) || (inv->timer->refresher == TR_UAS && inv->timer->role == PJSIP_ROLE_UAS); /* Do action based on role, refresher or refreshee */ if (as_refresher) { pj_time_val now; /* As refresher, reshedule the refresh request on the following: * - msut not send re-INVITE if another INVITE or SDP negotiation * is in progress. * - must not send UPDATE with SDP if SDP negotiation is in progress */ pjmedia_sdp_neg_state neg_state = pjmedia_sdp_neg_get_state(inv->neg); if ( (!inv->timer->use_update && ( inv->invite_tsx != NULL || neg_state != PJMEDIA_SDP_NEG_STATE_DONE) ) || (inv->timer->use_update && inv->timer->with_sdp && neg_state != PJMEDIA_SDP_NEG_STATE_DONE ) ) { pj_time_val delay = {1, 0}; inv->timer->timer.id = 1; pjsip_endpt_schedule_timer(inv->dlg->endpt, &inv->timer->timer, &delay); pjsip_dlg_dec_lock(inv->dlg); return; } /* Refresher, refresh the session */ if (inv->timer->use_update) { const pjmedia_sdp_session *offer = NULL; if (inv->timer->with_sdp) { pjmedia_sdp_neg_get_active_local(inv->neg, &offer); } status = pjsip_inv_update(inv, NULL, offer, &tdata); } else { /* Create re-INVITE without modifying session */ pjsip_msg_body *body; const pjmedia_sdp_session *offer = NULL; pj_assert(pjmedia_sdp_neg_get_state(inv->neg) == PJMEDIA_SDP_NEG_STATE_DONE); status = pjsip_inv_invite(inv, &tdata); if (status == PJ_SUCCESS) status = pjmedia_sdp_neg_send_local_offer(inv->pool_prov, inv->neg, &offer); if (status == PJ_SUCCESS) status = pjmedia_sdp_neg_get_neg_local(inv->neg, &offer); if (status == PJ_SUCCESS) { status = pjsip_create_sdp_body(tdata->pool, (pjmedia_sdp_session*)offer, &body); tdata->msg->body = body; } } pj_gettimeofday(&now); PJ_LOG(4, (inv->pool->obj_name, "Refreshing session after %ds (expiration period=%ds)", (now.sec-inv->timer->last_refresh.sec), inv->timer->setting.sess_expires)); } else { pj_time_val now; /* Refreshee, terminate the session */ status = pjsip_inv_end_session(inv, PJSIP_SC_REQUEST_TIMEOUT, NULL, &tdata); pj_gettimeofday(&now); PJ_LOG(3, (inv->pool->obj_name, "No session refresh received after %ds " "(expiration period=%ds), stopping session now!", (now.sec-inv->timer->last_refresh.sec), inv->timer->setting.sess_expires)); } /* Unlock dialog. */ pjsip_dlg_dec_lock(inv->dlg); /* Send message, if any */ if (tdata && status == PJ_SUCCESS) { status = pjsip_inv_send_msg(inv, tdata); } /* Print error message, if any */ if (status != PJ_SUCCESS) { PJ_PERROR(2, (inv->pool->obj_name, status, "Error in %s session timer", (as_refresher? "refreshing" : "terminating"))); } }
/* * Update EC state */ static void echo_supp_update(echo_supp *ec, pj_int16_t *rec_frm, const pj_int16_t *play_frm) { int prev_index; unsigned i, j, frm_level, sum_play_level, ulaw; pj_uint16_t old_rec_frm_level, old_play_frm_level; float play_corr; ++ec->update_cnt; if (ec->update_cnt > 0x7FFFFFFF) ec->update_cnt = 0x7FFFFFFF; /* Detect overflow */ /* Calculate current play frame level */ frm_level = pjmedia_calc_avg_signal(play_frm, ec->samples_per_segment); ++frm_level; /* to avoid division by zero */ /* Save the oldest frame level for later */ old_play_frm_level = ec->play_hist[0]; /* Push current frame level to the back of the play history */ pj_array_erase(ec->play_hist, sizeof(pj_uint16_t), ec->play_hist_cnt, 0); ec->play_hist[ec->play_hist_cnt-1] = (pj_uint16_t) frm_level; /* Calculate level of current mic frame */ frm_level = pjmedia_calc_avg_signal(rec_frm, ec->samples_per_segment); ++frm_level; /* to avoid division by zero */ /* Save the oldest frame level for later */ old_rec_frm_level = ec->rec_hist[0]; /* Push to the back of the rec history */ pj_array_erase(ec->rec_hist, sizeof(pj_uint16_t), ec->templ_cnt, 0); ec->rec_hist[ec->templ_cnt-1] = (pj_uint16_t) frm_level; /* Can't do the calc until the play history is full. */ if (ec->update_cnt < ec->play_hist_cnt) return; /* Skip if learning is done */ if (!ec->learning) return; /* Calculate rec signal pattern */ if (ec->sum_rec_level == 0) { /* Buffer has just been filled up, do full calculation */ ec->rec_corr = 0; ec->sum_rec_level = 0; for (i=0; i < ec->templ_cnt-1; ++i) { float corr; corr = (float)ec->rec_hist[i+1] / ec->rec_hist[i]; ec->rec_corr += corr; ec->sum_rec_level += ec->rec_hist[i]; } ec->sum_rec_level += ec->rec_hist[i]; } else { /* Update from previous calculation */ ec->sum_rec_level = ec->sum_rec_level - old_rec_frm_level + ec->rec_hist[ec->templ_cnt-1]; ec->rec_corr = ec->rec_corr - ((float)ec->rec_hist[0] / old_rec_frm_level) + ((float)ec->rec_hist[ec->templ_cnt-1] / ec->rec_hist[ec->templ_cnt-2]); } /* Iterate through the play history and calculate the signal correlation * for every tail position in the play_hist. Save the result in temporary * array since we may bail out early if the conversation state is not good * to detect echo. */ /* * First phase: do full calculation for the first position */ if (ec->sum_play_level0 == 0) { /* Buffer has just been filled up, do full calculation */ sum_play_level = 0; play_corr = 0; for (j=0; j<ec->templ_cnt-1; ++j) { float corr; corr = (float)ec->play_hist[j+1] / ec->play_hist[j]; play_corr += corr; sum_play_level += ec->play_hist[j]; } sum_play_level += ec->play_hist[j]; ec->sum_play_level0 = sum_play_level; ec->play_corr0 = play_corr; } else { /* Update from previous calculation */ ec->sum_play_level0 = ec->sum_play_level0 - old_play_frm_level + ec->play_hist[ec->templ_cnt-1]; ec->play_corr0 = ec->play_corr0 - ((float)ec->play_hist[0] / old_play_frm_level) + ((float)ec->play_hist[ec->templ_cnt-1] / ec->play_hist[ec->templ_cnt-2]); sum_play_level = ec->sum_play_level0; play_corr = ec->play_corr0; } ec->tmp_corr[0] = FABS(play_corr - ec->rec_corr); ec->tmp_factor[0] = (float)ec->sum_rec_level / sum_play_level; /* Bail out if remote isn't talking */ ulaw = pjmedia_linear2ulaw(sum_play_level/ec->templ_cnt) ^ 0xFF; if (ulaw < MIN_SIGNAL_ULAW) { echo_supp_set_state(ec, ST_REM_SILENT, ulaw); return; } /* Bail out if local user is talking */ if (ec->sum_rec_level >= sum_play_level) { echo_supp_set_state(ec, ST_LOCAL_TALK, ulaw); return; } /* * Second phase: do incremental calculation for the rest of positions */ for (i=1; i < ec->tail_cnt; ++i) { unsigned end; end = i + ec->templ_cnt; sum_play_level = sum_play_level - ec->play_hist[i-1] + ec->play_hist[end-1]; play_corr = play_corr - ((float)ec->play_hist[i]/ec->play_hist[i-1]) + ((float)ec->play_hist[end-1]/ec->play_hist[end-2]); /* Bail out if remote isn't talking */ ulaw = pjmedia_linear2ulaw(sum_play_level/ec->templ_cnt) ^ 0xFF; if (ulaw < MIN_SIGNAL_ULAW) { echo_supp_set_state(ec, ST_REM_SILENT, ulaw); return; } /* Bail out if local user is talking */ if (ec->sum_rec_level >= sum_play_level) { echo_supp_set_state(ec, ST_LOCAL_TALK, ulaw); return; } #if 0 // disabled: not a good idea if mic throws out loud echo /* Also bail out if we suspect there's a doubletalk */ ulaw = pjmedia_linear2ulaw(ec->sum_rec_level/ec->templ_cnt) ^ 0xFF; if (ulaw > MIN_SIGNAL_ULAW) { echo_supp_set_state(ec, ST_DOUBLETALK, ulaw); return; } #endif /* Calculate correlation and save to temporary array */ ec->tmp_corr[i] = FABS(play_corr - ec->rec_corr); /* Also calculate the gain factor between mic and speaker level */ ec->tmp_factor[i] = (float)ec->sum_rec_level / sum_play_level; pj_assert(ec->tmp_factor[i] < 1); } /* We seem to have good signal, we can update the EC state */ echo_supp_set_state(ec, ST_REM_TALK, MIN_SIGNAL_ULAW); /* Accummulate the correlation value to the history and at the same * time find the tail index of the best correlation. */ prev_index = ec->tail_index; for (i=1; i<ec->tail_cnt-1; ++i) { float *p = &ec->corr_sum[i], sum; /* Accummulate correlation value for this tail position */ ec->corr_sum[i] += ec->tmp_corr[i]; /* Update the min and avg gain factor for this tail position */ if (ec->tmp_factor[i] < ec->min_factor[i]) ec->min_factor[i] = ec->tmp_factor[i]; ec->avg_factor[i] = ((ec->avg_factor[i] * ec->tail_cnt) + ec->tmp_factor[i]) / (ec->tail_cnt + 1); /* To get the best correlation, also include the correlation * value of the neighbouring tail locations. */ sum = *(p-1) + (*p)*2 + *(p+1); //sum = *p; /* See if we have better correlation value */ if (sum < ec->best_corr) { ec->tail_index = i; ec->best_corr = sum; } } if (ec->tail_index != prev_index) { unsigned duration; int imin, iavg; duration = ec->update_cnt * SEGMENT_PTIME; imin = (int)(ec->min_factor[ec->tail_index] * 1000); iavg = (int)(ec->avg_factor[ec->tail_index] * 1000); PJ_LOG(4,(THIS_FILE, "Echo suppressor updated at t=%03d.%03ds, echo tail=%d msec" ", factor min/avg=%d.%03d/%d.%03d", (duration/1000), (duration%1000), (ec->tail_cnt-ec->tail_index) * SEGMENT_PTIME, imin/1000, imin%1000, iavg/1000, iavg%1000)); } ++ec->calc_cnt; if (ec->calc_cnt > ec->max_calc) { unsigned duration; int imin, iavg; ec->learning = PJ_FALSE; ec->running_cnt = 0; duration = ec->update_cnt * SEGMENT_PTIME; imin = (int)(ec->min_factor[ec->tail_index] * 1000); iavg = (int)(ec->avg_factor[ec->tail_index] * 1000); PJ_LOG(4,(THIS_FILE, "Echo suppressor learning done at t=%03d.%03ds, tail=%d ms" ", factor min/avg=%d.%03d/%d.%03d", (duration/1000), (duration%1000), (ec->tail_cnt-ec->tail_index) * SEGMENT_PTIME, imin/1000, imin%1000, iavg/1000, iavg%1000)); } }
PJ_DEF(pj_status_t) pjmedia_vid_port_create( pj_pool_t *pool, const pjmedia_vid_port_param *prm, pjmedia_vid_port **p_vid_port) { pjmedia_vid_port *vp; const pjmedia_video_format_detail *vfd; char dev_name[64]; char fmt_name[5]; pjmedia_vid_dev_cb vid_cb; pj_bool_t need_frame_buf = PJ_FALSE; pj_status_t status; unsigned ptime_usec; pjmedia_vid_dev_param vparam; pjmedia_vid_dev_info di; unsigned i; PJ_ASSERT_RETURN(pool && prm && p_vid_port, PJ_EINVAL); PJ_ASSERT_RETURN(prm->vidparam.fmt.type == PJMEDIA_TYPE_VIDEO && prm->vidparam.dir != PJMEDIA_DIR_NONE && prm->vidparam.dir != PJMEDIA_DIR_CAPTURE_RENDER, PJ_EINVAL); /* Retrieve the video format detail */ vfd = pjmedia_format_get_video_format_detail(&prm->vidparam.fmt, PJ_TRUE); if (!vfd) return PJ_EINVAL; PJ_ASSERT_RETURN(vfd->fps.num, PJ_EINVAL); /* Allocate videoport */ vp = PJ_POOL_ZALLOC_T(pool, pjmedia_vid_port); vp->pool = pj_pool_create(pool->factory, "video port", 500, 500, NULL); vp->role = prm->active ? ROLE_ACTIVE : ROLE_PASSIVE; vp->dir = prm->vidparam.dir; // vp->cap_size = vfd->size; vparam = prm->vidparam; dev_name[0] = '\0'; /* Get device info */ if (vp->dir & PJMEDIA_DIR_CAPTURE) status = pjmedia_vid_dev_get_info(prm->vidparam.cap_id, &di); else status = pjmedia_vid_dev_get_info(prm->vidparam.rend_id, &di); if (status != PJ_SUCCESS) return status; pj_ansi_snprintf(dev_name, sizeof(dev_name), "%s [%s]", di.name, di.driver); for (i = 0; i < di.fmt_cnt; ++i) { if (prm->vidparam.fmt.id == di.fmt[i].id) break; } if (i == di.fmt_cnt) { /* The device has no no matching format. Pick one from * the supported formats, and later use converter to * convert it to the required format. */ pj_assert(di.fmt_cnt != 0); vparam.fmt.id = di.fmt[0].id; } pj_strdup2_with_null(pool, &vp->dev_name, di.name); vp->stream_role = di.has_callback ? ROLE_ACTIVE : ROLE_PASSIVE; pjmedia_fourcc_name(vparam.fmt.id, fmt_name); PJ_LOG(4,(THIS_FILE, "Opening device %s for %s: format=%s, size=%dx%d @%d:%d fps", dev_name, vid_dir_name(prm->vidparam.dir), fmt_name, vfd->size.w, vfd->size.h, vfd->fps.num, vfd->fps.denum)); ptime_usec = PJMEDIA_PTIME(&vfd->fps); pjmedia_clock_src_init(&vp->clocksrc, PJMEDIA_TYPE_VIDEO, prm->vidparam.clock_rate, ptime_usec); vp->sync_clocksrc.max_sync_ticks = PJMEDIA_CLOCK_SYNC_MAX_RESYNC_DURATION * 1000 / vp->clocksrc.ptime_usec; /* Create the video stream */ pj_bzero(&vid_cb, sizeof(vid_cb)); vid_cb.capture_cb = &vidstream_cap_cb; vid_cb.render_cb = &vidstream_render_cb; status = pjmedia_vid_dev_stream_create(&vparam, &vid_cb, vp, &vp->strm); if (status != PJ_SUCCESS) goto on_error; PJ_LOG(4,(THIS_FILE, "Device %s opened: format=%s, size=%dx%d @%d:%d fps", dev_name, fmt_name, vparam.fmt.det.vid.size.w, vparam.fmt.det.vid.size.h, vparam.fmt.det.vid.fps.num, vparam.fmt.det.vid.fps.denum)); /* Subscribe to device's events */ pjmedia_event_subscribe(NULL, &vidstream_event_cb, vp, vp->strm); if (vp->dir & PJMEDIA_DIR_CAPTURE) { pjmedia_format_copy(&vp->conv.conv_param.src, &vparam.fmt); pjmedia_format_copy(&vp->conv.conv_param.dst, &prm->vidparam.fmt); } else { pjmedia_format_copy(&vp->conv.conv_param.src, &prm->vidparam.fmt); pjmedia_format_copy(&vp->conv.conv_param.dst, &vparam.fmt); } status = create_converter(vp); if (status != PJ_SUCCESS) goto on_error; if (vp->role==ROLE_ACTIVE && ((vp->dir & PJMEDIA_DIR_ENCODING) || vp->stream_role==ROLE_PASSIVE)) { pjmedia_clock_param param; /* Active role is wanted, but our device is passive, so create * master clocks to run the media flow. For encoding direction, * we also want to create our own clock since the device's clock * may run at a different rate. */ need_frame_buf = PJ_TRUE; param.usec_interval = PJMEDIA_PTIME(&vfd->fps); param.clock_rate = prm->vidparam.clock_rate; status = pjmedia_clock_create2(pool, ¶m, PJMEDIA_CLOCK_NO_HIGHEST_PRIO, (vp->dir & PJMEDIA_DIR_ENCODING) ? &enc_clock_cb: &dec_clock_cb, vp, &vp->clock); if (status != PJ_SUCCESS) goto on_error; } else if (vp->role==ROLE_PASSIVE) { vid_pasv_port *pp; /* Always need to create media port for passive role */ vp->pasv_port = pp = PJ_POOL_ZALLOC_T(pool, vid_pasv_port); pp->vp = vp; pp->base.get_frame = &vid_pasv_port_get_frame; pp->base.put_frame = &vid_pasv_port_put_frame; pjmedia_port_info_init2(&pp->base.info, &vp->dev_name, PJMEDIA_SIG_VID_PORT, prm->vidparam.dir, &prm->vidparam.fmt); if (vp->stream_role == ROLE_ACTIVE) { need_frame_buf = PJ_TRUE; } } if (need_frame_buf) { const pjmedia_video_format_info *vfi; pjmedia_video_apply_fmt_param vafp; vfi = pjmedia_get_video_format_info(NULL, vparam.fmt.id); if (!vfi) { status = PJ_ENOTFOUND; goto on_error; } pj_bzero(&vafp, sizeof(vafp)); vafp.size = vparam.fmt.det.vid.size; status = vfi->apply_fmt(vfi, &vafp); if (status != PJ_SUCCESS) goto on_error; vp->frm_buf = PJ_POOL_ZALLOC_T(pool, pjmedia_frame); vp->frm_buf_size = vafp.framebytes; vp->frm_buf->buf = pj_pool_alloc(pool, vafp.framebytes); vp->frm_buf->size = vp->frm_buf_size; vp->frm_buf->type = PJMEDIA_FRAME_TYPE_NONE; status = pj_mutex_create_simple(pool, vp->dev_name.ptr, &vp->frm_mutex); if (status != PJ_SUCCESS) goto on_error; } *p_vid_port = vp; return PJ_SUCCESS; on_error: pjmedia_vid_port_destroy(vp); return status; }
/** * Private: process pager message. * This may trigger pjsua_ui_on_pager() or pjsua_ui_on_typing(). */ void pjsua_im_process_pager(int call_id, const pj_str_t *from, const pj_str_t *to, pjsip_rx_data *rdata) { pjsip_contact_hdr *contact_hdr; pj_str_t contact; pjsip_msg_body *body = rdata->msg_info.msg->body; #if 0 /* Ticket #693: allow incoming MESSAGE without message body */ /* Body MUST have been checked before */ pj_assert(body != NULL); #endif /* Build remote contact */ contact_hdr = (pjsip_contact_hdr*) pjsip_msg_find_hdr(rdata->msg_info.msg, PJSIP_H_CONTACT, NULL); if (contact_hdr && contact_hdr->uri) { contact.ptr = (char*) pj_pool_alloc(rdata->tp_info.pool, PJSIP_MAX_URL_SIZE); contact.slen = pjsip_uri_print(PJSIP_URI_IN_CONTACT_HDR, contact_hdr->uri, contact.ptr, PJSIP_MAX_URL_SIZE); } else { contact.slen = 0; } if (body && pj_stricmp(&body->content_type.type, &STR_MIME_APP)==0 && pj_stricmp(&body->content_type.subtype, &STR_MIME_ISCOMPOSING)==0) { /* Expecting typing indication */ pj_status_t status; pj_bool_t is_typing; status = pjsip_iscomposing_parse(rdata->tp_info.pool, (char*)body->data, body->len, &is_typing, NULL, NULL, NULL ); if (status != PJ_SUCCESS) { pjsua_perror(THIS_FILE, "Invalid MESSAGE body", status); return; } if (pjsua_var.ua_cfg.cb.on_typing) { (*pjsua_var.ua_cfg.cb.on_typing)(call_id, from, to, &contact, is_typing); } if (pjsua_var.ua_cfg.cb.on_typing2) { pjsua_acc_id acc_id; if (call_id == PJSUA_INVALID_ID) { acc_id = pjsua_acc_find_for_incoming(rdata); } else { pjsua_call *call = &pjsua_var.calls[call_id]; acc_id = call->acc_id; } (*pjsua_var.ua_cfg.cb.on_typing2)(call_id, from, to, &contact, is_typing, rdata, acc_id); } } else { pj_str_t mime_type; char buf[256]; pjsip_media_type *m; pj_str_t text_body; /* Save text body */ if (body) { text_body.ptr = (char*)rdata->msg_info.msg->body->data; text_body.slen = rdata->msg_info.msg->body->len; /* Get mime type */ m = &rdata->msg_info.msg->body->content_type; mime_type.ptr = buf; mime_type.slen = pj_ansi_snprintf(buf, sizeof(buf), "%.*s/%.*s", (int)m->type.slen, m->type.ptr, (int)m->subtype.slen, m->subtype.ptr); if (mime_type.slen < 1) mime_type.slen = 0; } else { text_body.ptr = mime_type.ptr = ""; text_body.slen = mime_type.slen = 0; } if (pjsua_var.ua_cfg.cb.on_pager) { (*pjsua_var.ua_cfg.cb.on_pager)(call_id, from, to, &contact, &mime_type, &text_body); } if (pjsua_var.ua_cfg.cb.on_pager2) { pjsua_acc_id acc_id; if (call_id == PJSUA_INVALID_ID) { acc_id = pjsua_acc_find_for_incoming(rdata); } else { pjsua_call *call = &pjsua_var.calls[call_id]; acc_id = call->acc_id; } (*pjsua_var.ua_cfg.cb.on_pager2)(call_id, from, to, &contact, &mime_type, &text_body, rdata, acc_id); } } }
/* * pj_thread_register(..) */ PJ_DEF(pj_status_t) pj_thread_register ( const char *cstr_thread_name, pj_thread_desc desc, pj_thread_t **ptr_thread) { #if PJ_HAS_THREADS char stack_ptr; pj_status_t rc; pj_thread_t *thread = (pj_thread_t *)desc; pj_str_t thread_name = pj_str((char*)cstr_thread_name); /* Size sanity check. */ if (sizeof(pj_thread_desc) < sizeof(pj_thread_t)) { pj_assert(!"Not enough pj_thread_desc size!"); return PJ_EBUG; } /* Warn if this thread has been registered before */ if (pj_thread_local_get (thread_tls_id) != 0) { // 2006-02-26 bennylp: // This wouldn't work in all cases!. // If thread is created by external module (e.g. sound thread), // thread may be reused while the pool used for the thread descriptor // has been deleted by application. //*thread_ptr = (pj_thread_t*)pj_thread_local_get (thread_tls_id); //return PJ_SUCCESS; PJ_LOG(4,(THIS_FILE, "Info: possibly re-registering existing " "thread")); } /* On the other hand, also warn if the thread descriptor buffer seem to * have been used to register other threads. */ pj_assert(thread->signature1 != SIGNATURE1 || thread->signature2 != SIGNATURE2 || (thread->thread == pthread_self())); /* Initialize and set the thread entry. */ pj_bzero(desc, sizeof(struct pj_thread_t)); thread->thread = pthread_self(); thread->signature1 = SIGNATURE1; thread->signature2 = SIGNATURE2; if(cstr_thread_name && pj_strlen(&thread_name) < sizeof(thread->obj_name)-1) pj_ansi_snprintf(thread->obj_name, sizeof(thread->obj_name), cstr_thread_name, thread->thread); else pj_ansi_snprintf(thread->obj_name, sizeof(thread->obj_name), "thr%p", (void*)thread->thread); rc = pj_thread_local_set(thread_tls_id, thread); if (rc != PJ_SUCCESS) { pj_bzero(desc, sizeof(struct pj_thread_t)); return rc; } #if defined(PJ_OS_HAS_CHECK_STACK) && PJ_OS_HAS_CHECK_STACK!=0 thread->stk_start = &stack_ptr; thread->stk_size = 0xFFFFFFFFUL; thread->stk_max_usage = 0; #else stack_ptr = '\0'; #endif *ptr_thread = thread; return PJ_SUCCESS; #else pj_thread_t *thread = (pj_thread_t*)desc; *ptr_thread = thread; return PJ_SUCCESS; #endif }
PJ_DEF(void) pjpidf_tuple_set_id(pj_pool_t *pool, pjpidf_tuple *t, const pj_str_t *id) { pj_xml_attr *attr = pj_xml_find_attr(t, &ID, NULL); pj_assert(attr); pj_strdup(pool, &attr->value, id); }
static void tsx_callback(void *token, pjsip_event *event) { pj_status_t status; pjsip_regc *regc = (pjsip_regc*) token; pjsip_transaction *tsx = event->body.tsx_state.tsx; pj_atomic_inc(regc->busy_ctr); pj_lock_acquire(regc->lock); /* Decrement pending transaction counter. */ pj_assert(regc->has_tsx); regc->has_tsx = PJ_FALSE; /* Add reference to the transport */ if (tsx->transport != regc->last_transport) { if (regc->last_transport) { pjsip_transport_dec_ref(regc->last_transport); regc->last_transport = NULL; } if (tsx->transport) { regc->last_transport = tsx->transport; pjsip_transport_add_ref(regc->last_transport); } } /* Handle 401/407 challenge (even when _delete_flag is set) */ if (tsx->status_code == PJSIP_SC_PROXY_AUTHENTICATION_REQUIRED || tsx->status_code == PJSIP_SC_UNAUTHORIZED) { pjsip_rx_data *rdata = event->body.tsx_state.src.rdata; pjsip_tx_data *tdata; /* reset current op */ regc->current_op = REGC_IDLE; status = pjsip_auth_clt_reinit_req( ®c->auth_sess, rdata, tsx->last_tx, &tdata); if (status == PJ_SUCCESS) { status = pjsip_regc_send(regc, tdata); } if (status != PJ_SUCCESS) { /* Only call callback if application is still interested * in it. */ if (regc->_delete_flag == 0) { /* Should be safe to release the lock temporarily. * We do this to avoid deadlock. */ pj_lock_release(regc->lock); call_callback(regc, status, tsx->status_code, &rdata->msg_info.msg->line.status.reason, rdata, -1, 0, NULL); pj_lock_acquire(regc->lock); } } } else if (regc->_delete_flag) { /* User has called pjsip_regc_destroy(), so don't call callback. * This regc will be destroyed later in this function. */ /* Just reset current op */ regc->current_op = REGC_IDLE; } else { pjsip_rx_data *rdata; pj_int32_t expiration = NOEXP; unsigned contact_cnt = 0; pjsip_contact_hdr *contact[PJSIP_REGC_MAX_CONTACT]; if (tsx->status_code/100 == 2) { rdata = event->body.tsx_state.src.rdata; /* Calculate expiration */ expiration = calculate_response_expiration(regc, rdata, &contact_cnt, PJSIP_REGC_MAX_CONTACT, contact); /* Mark operation as complete */ regc->current_op = REGC_IDLE; /* Schedule next registration */ if (regc->auto_reg && expiration > 0) { pj_time_val delay = { 0, 0}; delay.sec = expiration - DELAY_BEFORE_REFRESH; if (regc->expires != PJSIP_REGC_EXPIRATION_NOT_SPECIFIED && delay.sec > (pj_int32_t)regc->expires) { delay.sec = regc->expires; } if (delay.sec < DELAY_BEFORE_REFRESH) delay.sec = DELAY_BEFORE_REFRESH; regc->timer.cb = ®c_refresh_timer_cb; regc->timer.id = REFRESH_TIMER; regc->timer.user_data = regc; pjsip_endpt_schedule_timer( regc->endpt, ®c->timer, &delay); pj_gettimeofday(®c->last_reg); regc->next_reg = regc->last_reg; regc->next_reg.sec += delay.sec; } } else { rdata = (event->body.tsx_state.type==PJSIP_EVENT_RX_MSG) ? event->body.tsx_state.src.rdata : NULL; } /* Update registration */ if (expiration==NOEXP) expiration=-1; regc->expires = expiration; /* Call callback. */ /* Should be safe to release the lock temporarily. * We do this to avoid deadlock. */ pj_lock_release(regc->lock); call_callback(regc, PJ_SUCCESS, tsx->status_code, (rdata ? &rdata->msg_info.msg->line.status.reason : pjsip_get_status_text(tsx->status_code)), rdata, expiration, contact_cnt, contact); pj_lock_acquire(regc->lock); } pj_lock_release(regc->lock); /* Delete the record if user destroy regc during the callback. */ if (pj_atomic_dec_and_get(regc->busy_ctr)==0 && regc->_delete_flag) { pjsip_regc_destroy(regc); } }