static pj_status_t ioqueue_init_key( pj_pool_t *pool, pj_ioqueue_t *ioqueue, pj_ioqueue_key_t *key, pj_sock_t sock, pj_grp_lock_t *grp_lock, void *user_data, const pj_ioqueue_callback *cb) { pj_status_t rc; int optlen; PJ_UNUSED_ARG(pool); key->ioqueue = ioqueue; key->fd = sock; key->user_data = user_data; pj_list_init(&key->read_list); pj_list_init(&key->write_list); #if PJ_HAS_TCP pj_list_init(&key->accept_list); key->connecting = 0; #endif /* Save callback. */ pj_memcpy(&key->cb, cb, sizeof(pj_ioqueue_callback)); #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Set initial reference count to 1 */ pj_assert(key->ref_count == 0); ++key->ref_count; key->closing = 0; #endif rc = pj_ioqueue_set_concurrency(key, ioqueue->default_concurrency); if (rc != PJ_SUCCESS) return rc; /* Get socket type. When socket type is datagram, some optimization * will be performed during send to allow parallel send operations. */ optlen = sizeof(key->fd_type); rc = pj_sock_getsockopt(sock, pj_SOL_SOCKET(), pj_SO_TYPE(), &key->fd_type, &optlen); if (rc != PJ_SUCCESS) key->fd_type = pj_SOCK_STREAM(); /* Create mutex for the key. */ #if !PJ_IOQUEUE_HAS_SAFE_UNREG rc = pj_lock_create_simple_mutex(poll, NULL, &key->lock); #endif if (rc != PJ_SUCCESS) return rc; /* Group lock */ key->grp_lock = grp_lock; if (key->grp_lock) { pj_grp_lock_add_ref_dbg(key->grp_lock, "ioqueue", 0); } return PJ_SUCCESS; }
/* * pj_ioqueue_create() */ PJ_DEF(pj_status_t) pj_ioqueue_create( pj_pool_t *pool, pj_size_t max_fd, pj_ioqueue_t **p_ioqueue) { pj_ioqueue_t *ioqueue; unsigned i; pj_status_t rc; PJ_UNUSED_ARG(max_fd); PJ_ASSERT_RETURN(pool && p_ioqueue, PJ_EINVAL); rc = sizeof(union operation_key); /* Check that sizeof(pj_ioqueue_op_key_t) makes sense. */ PJ_ASSERT_RETURN(sizeof(pj_ioqueue_op_key_t)-sizeof(void*) >= sizeof(union operation_key), PJ_EBUG); /* Create IOCP */ ioqueue = pj_pool_zalloc(pool, sizeof(*ioqueue)); ioqueue->iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0); if (ioqueue->iocp == NULL) return PJ_RETURN_OS_ERROR(GetLastError()); /* Create IOCP mutex */ rc = pj_lock_create_recursive_mutex(pool, NULL, &ioqueue->lock); if (rc != PJ_SUCCESS) { CloseHandle(ioqueue->iocp); return rc; } ioqueue->auto_delete_lock = PJ_TRUE; ioqueue->default_concurrency = PJ_IOQUEUE_DEFAULT_ALLOW_CONCURRENCY; #if PJ_IOQUEUE_HAS_SAFE_UNREG /* * Create and initialize key pools. */ pj_list_init(&ioqueue->active_list); pj_list_init(&ioqueue->free_list); pj_list_init(&ioqueue->closing_list); /* Preallocate keys according to max_fd setting, and put them * in free_list. */ for (i=0; i<max_fd; ++i) { pj_ioqueue_key_t *key; key = pj_pool_alloc(pool, sizeof(pj_ioqueue_key_t)); rc = pj_atomic_create(pool, 0, &key->ref_count); if (rc != PJ_SUCCESS) { key = ioqueue->free_list.next; while (key != &ioqueue->free_list) { pj_atomic_destroy(key->ref_count); pj_mutex_destroy(key->mutex); key = key->next; } CloseHandle(ioqueue->iocp); return rc; } rc = pj_mutex_create_recursive(pool, "ioqkey", &key->mutex); if (rc != PJ_SUCCESS) { pj_atomic_destroy(key->ref_count); key = ioqueue->free_list.next; while (key != &ioqueue->free_list) { pj_atomic_destroy(key->ref_count); pj_mutex_destroy(key->mutex); key = key->next; } CloseHandle(ioqueue->iocp); return rc; } pj_list_push_back(&ioqueue->free_list, key); } #endif *p_ioqueue = ioqueue; PJ_LOG(4, ("pjlib", "WinNT IOCP I/O Queue created (%p)", ioqueue)); return PJ_SUCCESS; }
/* * pj_ioqueue_unregister() */ PJ_DEF(pj_status_t) pj_ioqueue_unregister( pj_ioqueue_key_t *key ) { unsigned i; pj_bool_t has_lock; enum { RETRY = 10 }; PJ_ASSERT_RETURN(key, PJ_EINVAL); #if PJ_HAS_TCP if (key->connecting) { unsigned pos; pj_ioqueue_t *ioqueue; ioqueue = key->ioqueue; /* Erase from connecting_handles */ pj_lock_acquire(ioqueue->lock); for (pos=0; pos < ioqueue->connecting_count; ++pos) { if (ioqueue->connecting_keys[pos] == key) { erase_connecting_socket(ioqueue, pos); break; } } key->connecting = 0; pj_lock_release(ioqueue->lock); } #endif #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Mark key as closing before closing handle. */ key->closing = 1; /* If concurrency is disabled, wait until the key has finished * processing the callback */ if (key->allow_concurrent == PJ_FALSE) { pj_mutex_lock(key->mutex); has_lock = PJ_TRUE; } else { has_lock = PJ_FALSE; } #else PJ_UNUSED_ARG(has_lock); #endif /* Close handle (the only way to disassociate handle from IOCP). * We also need to close handle to make sure that no further events * will come to the handle. */ /* Update 2008/07/18 (http://trac.pjsip.org/repos/ticket/575): * - It seems that CloseHandle() in itself does not actually close * the socket (i.e. it will still appear in "netstat" output). Also * if we only use CloseHandle(), an "Invalid Handle" exception will * be raised in WSACleanup(). * - MSDN documentation says that CloseHandle() must be called after * closesocket() call (see * http://msdn.microsoft.com/en-us/library/ms724211(VS.85).aspx). * But turns out that this will raise "Invalid Handle" exception * in debug mode. * So because of this, we replaced CloseHandle() with closesocket() * instead. These was tested on WinXP SP2. */ //CloseHandle(key->hnd); pj_sock_close((pj_sock_t)key->hnd); /* Reset callbacks */ key->cb.on_accept_complete = NULL; key->cb.on_connect_complete = NULL; key->cb.on_read_complete = NULL; key->cb.on_write_complete = NULL; #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Even after handle is closed, I suspect that IOCP may still try to * do something with the handle, causing memory corruption when pool * debugging is enabled. * * Forcing context switch seems to have fixed that, but this is quite * an ugly solution.. * * Update 2008/02/13: * This should not happen if concurrency is disallowed for the key. * So at least application has a solution for this (i.e. by disallowing * concurrency in the key). */ //This will loop forever if unregistration is done on the callback. //Doing this with RETRY I think should solve the IOCP setting the //socket signalled, without causing the deadlock. //while (pj_atomic_get(key->ref_count) != 1) // pj_thread_sleep(0); for (i=0; pj_atomic_get(key->ref_count) != 1 && i<RETRY; ++i) pj_thread_sleep(0); /* Decrement reference counter to destroy the key. */ decrement_counter(key); if (has_lock) pj_mutex_unlock(key->mutex); #endif return PJ_SUCCESS; }
/* * pj_thread_resume() */ PJ_DEF(pj_status_t) pj_thread_resume(pj_thread_t *p) { PJ_UNUSED_ARG(p); return PJ_EINVALIDOP; }
static pj_status_t pj_vpx_codec_decode(pjmedia_vid_codec *codec, pj_size_t pkt_count, pjmedia_frame packets[], unsigned out_size, pjmedia_frame *output) { vpx_private *vpx = (vpx_private*) codec->codec_data; vpx_image_t *img; vpx_codec_iter_t iter; int i, res; PJ_ASSERT_RETURN(codec && pkt_count > 0 && packets && output, PJ_EINVAL); vpx->dec_frame_len = 0; /* TODO : packet parsing is absolutely incomplete here !!!! * We should manage extensions, partitions etc * */ for (i = 0; i < pkt_count; ++i) { pj_uint8_t *data; pj_uint8_t extended_bit, s_bit, partition_id; unsigned extension_len = 0; unsigned payload_size = packets[i].size; if(payload_size == 0) { continue; } data = packets[i].buf; extended_bit = (*data) & 0x80; s_bit = (*data) & 0x20; partition_id = (*data) & 0x1F; PJ_UNUSED_ARG(s_bit); PJ_UNUSED_ARG(partition_id); /* First octet is for */ /* |X|R|N|S|PartID | */ if(extended_bit) { pj_uint8_t i_bit, l_bit, t_bit, k_bit; (data)++; extension_len++; i_bit = (*data) & 0x80; l_bit = (*data) & 0x40; t_bit = (*data) & 0x20; k_bit = (*data) & 0x10; if(payload_size <= 1) { PJ_LOG(4, (THIS_FILE, "Error decoding VP8 extended attributes")); continue; } /* We have extension in octet 2 */ /* |I|L|T|K| RSV | */ if (i_bit) { data++; if(payload_size <= 2){ PJ_LOG(4, (THIS_FILE, "Error decoding VP8 extended picture ID attribute")); continue; } // I present check M flag for long picture ID if ((*data) & 0x80) { data++; } } if (l_bit) { data++; } if (t_bit || k_bit) { data++; } } data++; payload_size = packets[i].size - (data - (pj_uint8_t*)packets[i].buf); //PJ_LOG(4, (THIS_FILE, "Unpack RTP %d size %d, start %d", i, packets[i].size, s[0] & 0x10)); if((vpx->dec_frame_len + payload_size) < vpx->dec_buf_size) { pj_memcpy(vpx->dec_buf + vpx->dec_frame_len, data, payload_size); vpx->dec_frame_len += payload_size; } else { PJ_LOG(1, (THIS_FILE, "Buffer is too small")); } } if(vpx->dec_frame_len == 0){ PJ_LOG(1, (THIS_FILE, "No content for these packets")); return PJ_SUCCESS; } res = vpx_codec_decode(&vpx->decoder, vpx->dec_buf, vpx->dec_frame_len, NULL, VPX_DL_REALTIME); switch (res) { case VPX_CODEC_UNSUP_BITSTREAM: case VPX_CODEC_UNSUP_FEATURE: case VPX_CODEC_CORRUPT_FRAME: /* Fatal errors to the stream, request a keyframe to see if we can recover */ PJ_LOG(4, (THIS_FILE, "Fatal error decoding stream: (%d) %s", res, vpx_codec_err_to_string(res))); pjmedia_event event; pjmedia_event_init(&event, PJMEDIA_EVENT_KEYFRAME_MISSING, NULL, codec); pjmedia_event_publish(NULL, codec, &event, 0); return PJMEDIA_CODEC_EBADBITSTREAM; case VPX_CODEC_OK: break; default: PJ_LOG(4, (THIS_FILE, "Failed to decode packets: (%d) %s", res, vpx_codec_err_to_string(res))); return PJMEDIA_ERROR; } iter = NULL; for (;;) { pj_status_t status; img = vpx_codec_get_frame(&vpx->decoder, &iter); if (img == NULL) break; status = pj_vpx_codec_decode_whole(codec, img, &packets[0].timestamp, out_size, output); if (status != PJ_SUCCESS) { PJ_LOG(4, (THIS_FILE, "Failed to decode frame")); /* XXX stop processing and request keyframe? */ } } return PJ_SUCCESS; }
TInt CPjTimeoutTimer::RunError(TInt aError) { PJ_UNUSED_ARG(aError); return KErrNone; }
/* * Get the highest priority value available on this system. */ PJ_DEF(int) pj_thread_get_prio_max(pj_thread_t *thread) { PJ_UNUSED_ARG(thread); return 1; }
/* Compare two tel: URI */ static int tel_uri_cmp( pjsip_uri_context_e context, const pjsip_tel_uri *url1, const pjsip_tel_uri *url2) { int result; PJ_UNUSED_ARG(context); /* Scheme must match. */ if (url1->vptr != url2->vptr) return -1; /* Compare number. */ result = pjsip_tel_nb_cmp(&url1->number, &url2->number); if (result != 0) return result; /* Compare phone-context as hostname or as as global nb. */ if (url1->context.slen) { if (*url1->context.ptr != '+') result = pj_stricmp(&url1->context, &url2->context); else result = pjsip_tel_nb_cmp(&url1->context, &url2->context); if (result != 0) return result; } else if (url2->context.slen) return -1; /* Compare extension. */ if (url1->ext_param.slen) { result = pjsip_tel_nb_cmp(&url1->ext_param, &url2->ext_param); if (result != 0) return result; } /* Compare isub bytes by bytes. */ if (url1->isub_param.slen) { result = pj_stricmp(&url1->isub_param, &url2->isub_param); if (result != 0) return result; } /* Other parameters are compared regardless of the order. * If one URI has parameter not found in the other URI, the URIs are * not equal. */ if (url1->other_param.next != &url1->other_param) { const pjsip_param *p1, *p2; int cnt1 = 0, cnt2 = 0; p1 = url1->other_param.next; while (p1 != &url1->other_param) { p2 = pjsip_param_cfind(&url2->other_param, &p1->name); if (!p2 ) return 1; result = pj_stricmp(&p1->value, &p2->value); if (result != 0) return result; p1 = p1->next; ++cnt1; } p2 = url2->other_param.next; while (p2 != &url2->other_param) ++cnt2, p2 = p2->next; if (cnt1 < cnt2) return -1; else if (cnt1 > cnt2) return 1; } else if (url2->other_param.next != &url2->other_param) return -1; /* Equal. */ return 0; }
PJ_DEF(pj_status_t) pj_file_open( pj_pool_t *pool, const char *pathname, unsigned flags, pj_oshandle_t *fd) { PJ_DECL_UNICODE_TEMP_BUF(wpathname,256) HANDLE hFile; DWORD dwDesiredAccess = 0; DWORD dwShareMode = 0; DWORD dwCreationDisposition = 0; DWORD dwFlagsAndAttributes = 0; PJ_UNUSED_ARG(pool); PJ_ASSERT_RETURN(pathname!=NULL, PJ_EINVAL); if ((flags & PJ_O_WRONLY) == PJ_O_WRONLY) { dwDesiredAccess |= GENERIC_WRITE; if ((flags & PJ_O_APPEND) == PJ_O_APPEND) { dwDesiredAccess |= FILE_APPEND_DATA; dwCreationDisposition |= OPEN_ALWAYS; } else { dwDesiredAccess &= ~(FILE_APPEND_DATA); dwCreationDisposition |= CREATE_ALWAYS; } } if ((flags & PJ_O_RDONLY) == PJ_O_RDONLY) { dwDesiredAccess |= GENERIC_READ; if (flags == PJ_O_RDONLY) dwCreationDisposition |= OPEN_EXISTING; } if (dwDesiredAccess == 0) { pj_assert(!"Invalid file open flags"); return PJ_EINVAL; } dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE; dwFlagsAndAttributes = FILE_ATTRIBUTE_NORMAL; hFile = CreateFile(PJ_STRING_TO_NATIVE(pathname,wpathname,sizeof(wpathname)), dwDesiredAccess, dwShareMode, NULL, dwCreationDisposition, dwFlagsAndAttributes, NULL); if (hFile == INVALID_HANDLE_VALUE) { *fd = 0; return PJ_RETURN_OS_ERROR(GetLastError()); } if ((flags & PJ_O_APPEND) == PJ_O_APPEND) { pj_status_t status; status = pj_file_setpos(hFile, 0, PJ_SEEK_END); if (status != PJ_SUCCESS) { pj_file_close(hFile); return status; } } *fd = hFile; return PJ_SUCCESS; }
/* * Destroy the transmit data. */ PJ_DEF(void) pj_stun_msg_destroy_tdata( pj_stun_session *sess, pj_stun_tx_data *tdata) { PJ_UNUSED_ARG(sess); destroy_tdata(tdata, PJ_FALSE); }
static const pj_str_t *tel_uri_get_scheme( const pjsip_tel_uri *uri ) { PJ_UNUSED_ARG(uri); return &pjsip_parser_const()->pjsip_TEL_STR; }
/* Create m=video SDP media line */ PJ_DEF(pj_status_t) pjmedia_endpt_create_video_sdp(pjmedia_endpt *endpt, pj_pool_t *pool, const pjmedia_sock_info *si, unsigned options, pjmedia_sdp_media **p_m) { const pj_str_t STR_VIDEO = { "video", 5 }; pjmedia_sdp_media *m; pjmedia_vid_codec_info codec_info[PJMEDIA_VID_CODEC_MGR_MAX_CODECS]; unsigned codec_prio[PJMEDIA_VID_CODEC_MGR_MAX_CODECS]; pjmedia_sdp_attr *attr; unsigned cnt, i; unsigned max_bitrate = 0; pj_status_t status; PJ_UNUSED_ARG(options); /* Make sure video codec manager is instantiated */ if (!pjmedia_vid_codec_mgr_instance()) pjmedia_vid_codec_mgr_create(endpt->pool, NULL); /* Create and init basic SDP media */ m = PJ_POOL_ZALLOC_T(pool, pjmedia_sdp_media); status = init_sdp_media(m, pool, &STR_VIDEO, si); if (status != PJ_SUCCESS) return status; cnt = PJ_ARRAY_SIZE(codec_info); status = pjmedia_vid_codec_mgr_enum_codecs(NULL, &cnt, codec_info, codec_prio); /* Check that there are not too many codecs */ PJ_ASSERT_RETURN(0 <= PJMEDIA_MAX_SDP_FMT, PJ_ETOOMANY); /* Add format, rtpmap, and fmtp (when applicable) for each codec */ for (i=0; i<cnt; ++i) { pjmedia_sdp_rtpmap rtpmap; pjmedia_vid_codec_param codec_param; pj_str_t *fmt; pjmedia_video_format_detail *vfd; pj_bzero(&rtpmap, sizeof(rtpmap)); if (codec_prio[i] == PJMEDIA_CODEC_PRIO_DISABLED) break; if (i > PJMEDIA_MAX_SDP_FMT) { /* Too many codecs, perhaps it is better to tell application by * returning appropriate status code. */ PJ_PERROR(3,(THIS_FILE, PJ_ETOOMANY, "Skipping some video codecs")); break; } /* Must support RTP packetization and bidirectional */ if ((codec_info[i].packings & PJMEDIA_VID_PACKING_PACKETS) == 0 || codec_info[i].dir != PJMEDIA_DIR_ENCODING_DECODING) { continue; } pjmedia_vid_codec_mgr_get_default_param(NULL, &codec_info[i], &codec_param); fmt = &m->desc.fmt[m->desc.fmt_count++]; fmt->ptr = (char*) pj_pool_alloc(pool, 8); fmt->slen = pj_utoa(codec_info[i].pt, fmt->ptr); rtpmap.pt = *fmt; /* Encoding name */ rtpmap.enc_name = codec_info[i].encoding_name; /* Clock rate */ rtpmap.clock_rate = codec_info[i].clock_rate; if (codec_info[i].pt >= 96 || pjmedia_add_rtpmap_for_static_pt) { pjmedia_sdp_rtpmap_to_attr(pool, &rtpmap, &attr); m->attr[m->attr_count++] = attr; } /* Add fmtp params */ if (codec_param.dec_fmtp.cnt > 0) { enum { MAX_FMTP_STR_LEN = 160 }; char buf[MAX_FMTP_STR_LEN]; unsigned buf_len = 0, j; pjmedia_codec_fmtp *dec_fmtp = &codec_param.dec_fmtp; /* Print codec PT */ buf_len += pj_ansi_snprintf(buf, MAX_FMTP_STR_LEN - buf_len, "%d", codec_info[i].pt); for (j = 0; j < dec_fmtp->cnt; ++j) { pj_size_t test_len = 2; /* Check if buf still available */ test_len = dec_fmtp->param[j].val.slen + dec_fmtp->param[j].name.slen + 2; if (test_len + buf_len >= MAX_FMTP_STR_LEN) return PJ_ETOOBIG; /* Print delimiter */ buf_len += pj_ansi_snprintf(&buf[buf_len], MAX_FMTP_STR_LEN - buf_len, (j == 0?" ":";")); /* Print an fmtp param */ if (dec_fmtp->param[j].name.slen) buf_len += pj_ansi_snprintf( &buf[buf_len], MAX_FMTP_STR_LEN - buf_len, "%.*s=%.*s", (int)dec_fmtp->param[j].name.slen, dec_fmtp->param[j].name.ptr, (int)dec_fmtp->param[j].val.slen, dec_fmtp->param[j].val.ptr); else buf_len += pj_ansi_snprintf(&buf[buf_len], MAX_FMTP_STR_LEN - buf_len, "%.*s", (int)dec_fmtp->param[j].val.slen, dec_fmtp->param[j].val.ptr); } attr = PJ_POOL_ZALLOC_T(pool, pjmedia_sdp_attr); attr->name = pj_str("fmtp"); attr->value = pj_strdup3(pool, buf); m->attr[m->attr_count++] = attr; } /* Find maximum bitrate in this media */ vfd = pjmedia_format_get_video_format_detail(&codec_param.enc_fmt, PJ_TRUE); if (vfd && max_bitrate < vfd->max_bps) max_bitrate = vfd->max_bps; } /* Put bandwidth info in media level using bandwidth modifier "TIAS" * (RFC3890). */ if (max_bitrate && pjmedia_add_bandwidth_tias_in_sdp) { const pj_str_t STR_BANDW_MODIFIER = { "TIAS", 4 }; pjmedia_sdp_bandw *b; b = PJ_POOL_ALLOC_T(pool, pjmedia_sdp_bandw); b->modifier = STR_BANDW_MODIFIER; b->value = max_bitrate; m->bandw[m->bandw_count++] = b; } *p_m = m; return PJ_SUCCESS; }
/* Create m=audio SDP media line */ PJ_DEF(pj_status_t) pjmedia_endpt_create_audio_sdp(pjmedia_endpt *endpt, pj_pool_t *pool, const pjmedia_sock_info *si, unsigned options, pjmedia_sdp_media **p_m) { const pj_str_t STR_AUDIO = { "audio", 5 }; pjmedia_sdp_media *m; pjmedia_sdp_attr *attr; unsigned i; unsigned max_bitrate = 0; pj_status_t status; PJ_UNUSED_ARG(options); /* Check that there are not too many codecs */ PJ_ASSERT_RETURN(endpt->codec_mgr.codec_cnt <= PJMEDIA_MAX_SDP_FMT, PJ_ETOOMANY); /* Create and init basic SDP media */ m = PJ_POOL_ZALLOC_T(pool, pjmedia_sdp_media); status = init_sdp_media(m, pool, &STR_AUDIO, si); if (status != PJ_SUCCESS) return status; /* Add format, rtpmap, and fmtp (when applicable) for each codec */ for (i=0; i<endpt->codec_mgr.codec_cnt; ++i) { pjmedia_codec_info *codec_info; pjmedia_sdp_rtpmap rtpmap; char tmp_param[3]; pjmedia_codec_param codec_param; pj_str_t *fmt; if (endpt->codec_mgr.codec_desc[i].prio == PJMEDIA_CODEC_PRIO_DISABLED) break; codec_info = &endpt->codec_mgr.codec_desc[i].info; pjmedia_codec_mgr_get_default_param(&endpt->codec_mgr, codec_info, &codec_param); fmt = &m->desc.fmt[m->desc.fmt_count++]; fmt->ptr = (char*) pj_pool_alloc(pool, 8); fmt->slen = pj_utoa(codec_info->pt, fmt->ptr); rtpmap.pt = *fmt; rtpmap.enc_name = codec_info->encoding_name; #if defined(PJMEDIA_HANDLE_G722_MPEG_BUG) && (PJMEDIA_HANDLE_G722_MPEG_BUG != 0) if (codec_info->pt == PJMEDIA_RTP_PT_G722) rtpmap.clock_rate = 8000; else rtpmap.clock_rate = codec_info->clock_rate; #else rtpmap.clock_rate = codec_info->clock_rate; #endif /* For audio codecs, rtpmap parameters denotes the number * of channels, which can be omited if the value is 1. */ if (codec_info->type == PJMEDIA_TYPE_AUDIO && codec_info->channel_cnt > 1) { /* Can only support one digit channel count */ pj_assert(codec_info->channel_cnt < 10); tmp_param[0] = (char)('0' + codec_info->channel_cnt); rtpmap.param.ptr = tmp_param; rtpmap.param.slen = 1; } else { rtpmap.param.ptr = ""; rtpmap.param.slen = 0; } if (codec_info->pt >= 96 || pjmedia_add_rtpmap_for_static_pt) { pjmedia_sdp_rtpmap_to_attr(pool, &rtpmap, &attr); m->attr[m->attr_count++] = attr; } /* Add fmtp params */ if (codec_param.setting.dec_fmtp.cnt > 0) { enum { MAX_FMTP_STR_LEN = 160 }; char buf[MAX_FMTP_STR_LEN]; unsigned buf_len = 0, ii; pjmedia_codec_fmtp *dec_fmtp = &codec_param.setting.dec_fmtp; /* Print codec PT */ buf_len += pj_ansi_snprintf(buf, MAX_FMTP_STR_LEN - buf_len, "%d", codec_info->pt); for (ii = 0; ii < dec_fmtp->cnt; ++ii) { pj_size_t test_len = 2; /* Check if buf still available */ test_len = dec_fmtp->param[ii].val.slen + dec_fmtp->param[ii].name.slen + 2; if (test_len + buf_len >= MAX_FMTP_STR_LEN) return PJ_ETOOBIG; /* Print delimiter */ buf_len += pj_ansi_snprintf(&buf[buf_len], MAX_FMTP_STR_LEN - buf_len, (ii == 0?" ":";")); /* Print an fmtp param */ if (dec_fmtp->param[ii].name.slen) buf_len += pj_ansi_snprintf( &buf[buf_len], MAX_FMTP_STR_LEN - buf_len, "%.*s=%.*s", (int)dec_fmtp->param[ii].name.slen, dec_fmtp->param[ii].name.ptr, (int)dec_fmtp->param[ii].val.slen, dec_fmtp->param[ii].val.ptr); else buf_len += pj_ansi_snprintf(&buf[buf_len], MAX_FMTP_STR_LEN - buf_len, "%.*s", (int)dec_fmtp->param[ii].val.slen, dec_fmtp->param[ii].val.ptr); } attr = PJ_POOL_ZALLOC_T(pool, pjmedia_sdp_attr); attr->name = pj_str("fmtp"); attr->value = pj_strdup3(pool, buf); m->attr[m->attr_count++] = attr; } /* Find maximum bitrate in this media */ if (max_bitrate < codec_param.info.max_bps) max_bitrate = codec_param.info.max_bps; } #if defined(PJMEDIA_RTP_PT_TELEPHONE_EVENTS) && \ PJMEDIA_RTP_PT_TELEPHONE_EVENTS != 0 /* * Add support telephony event */ if (endpt->has_telephone_event) { m->desc.fmt[m->desc.fmt_count++] = pj_str(PJMEDIA_RTP_PT_TELEPHONE_EVENTS_STR); /* Add rtpmap. */ attr = PJ_POOL_ZALLOC_T(pool, pjmedia_sdp_attr); attr->name = pj_str("rtpmap"); attr->value = pj_str(PJMEDIA_RTP_PT_TELEPHONE_EVENTS_STR " telephone-event/8000"); m->attr[m->attr_count++] = attr; /* Add fmtp */ attr = PJ_POOL_ZALLOC_T(pool, pjmedia_sdp_attr); attr->name = pj_str("fmtp"); #if defined(PJMEDIA_HAS_DTMF_FLASH) && PJMEDIA_HAS_DTMF_FLASH!= 0 attr->value = pj_str(PJMEDIA_RTP_PT_TELEPHONE_EVENTS_STR " 0-16"); #else attr->value = pj_str(PJMEDIA_RTP_PT_TELEPHONE_EVENTS_STR " 0-15"); #endif m->attr[m->attr_count++] = attr; } #endif /* Put bandwidth info in media level using bandwidth modifier "TIAS" * (RFC3890). */ if (max_bitrate && pjmedia_add_bandwidth_tias_in_sdp) { const pj_str_t STR_BANDW_MODIFIER = { "TIAS", 4 }; pjmedia_sdp_bandw *b; b = PJ_POOL_ALLOC_T(pool, pjmedia_sdp_bandw); b->modifier = STR_BANDW_MODIFIER; b->value = max_bitrate; m->bandw[m->bandw_count++] = b; } *p_m = m; return PJ_SUCCESS; }
/* * pj_ioqueue_sendto() * * Start asynchronous write() to the descriptor. */ PJ_DEF(pj_status_t) pj_ioqueue_sendto( pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, const void *data, pj_ssize_t *length, pj_uint32_t flags, const pj_sockaddr_t *addr, int addrlen) { struct write_operation *write_op; unsigned retry; pj_bool_t restart_retry = PJ_FALSE; pj_status_t status; pj_ssize_t sent; PJ_ASSERT_RETURN(key && op_key && data && length, PJ_EINVAL); PJ_CHECK_STACK(); #if defined(PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT) && \ PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT!=0 retry_on_restart: #else PJ_UNUSED_ARG(restart_retry); #endif /* Check if key is closing. */ if (IS_CLOSING(key)) return PJ_ECANCELLED; /* We can not use PJ_IOQUEUE_ALWAYS_ASYNC for socket write */ flags &= ~(PJ_IOQUEUE_ALWAYS_ASYNC); /* Fast track: * Try to send data immediately, only if there's no pending write! * Note: * We are speculating that the list is empty here without properly * acquiring ioqueue's mutex first. This is intentional, to maximize * performance via parallelism. * * This should be safe, because: * - by convention, we require caller to make sure that the * key is not unregistered while other threads are invoking * an operation on the same key. * - pj_list_empty() is safe to be invoked by multiple threads, * even when other threads are modifying the list. */ if (pj_list_empty(&key->write_list)) { /* * See if data can be sent immediately. */ sent = *length; status = pj_sock_sendto(key->fd, data, &sent, flags, addr, addrlen); if (status == PJ_SUCCESS) { /* Success! */ *length = sent; return PJ_SUCCESS; } else { /* If error is not EWOULDBLOCK (or EAGAIN on Linux), report * the error to caller. */ if (status != PJ_STATUS_FROM_OS(PJ_BLOCKING_ERROR_VAL)) { #if defined(PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT) && \ PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT!=0 /* Special treatment for dead UDP sockets here, see ticket #1107 */ if (status==PJ_STATUS_FROM_OS(EPIPE) && !IS_CLOSING(key) && key->fd_type==pj_SOCK_DGRAM() && !restart_retry) { PJ_PERROR(4,(THIS_FILE, status, "Send error for socket %d, retrying", key->fd)); replace_udp_sock(key); restart_retry = PJ_TRUE; goto retry_on_restart; } #endif return status; } } } /* * Check that address storage can hold the address parameter. */ PJ_ASSERT_RETURN(addrlen <= (int)sizeof(pj_sockaddr_in), PJ_EBUG); /* * Schedule asynchronous send. */ write_op = (struct write_operation*)op_key; /* Spin if write_op has pending operation */ for (retry=0; write_op->op != 0 && retry<PENDING_RETRY; ++retry) pj_thread_sleep(0); /* Last chance */ if (write_op->op) { /* Unable to send packet because there is already pending write on the * write_op. We could not put the operation into the write_op * because write_op already contains a pending operation! And * we could not send the packet directly with sendto() either, * because that will break the order of the packet. So we can * only return error here. * * This could happen for example in multithreads program, * where polling is done by one thread, while other threads are doing * the sending only. If the polling thread runs on lower priority * than the sending thread, then it's possible that the pending * write flag is not cleared in-time because clearing is only done * during polling. * * Aplication should specify multiple write operation keys on * situation like this. */ //pj_assert(!"ioqueue: there is pending operation on this key!"); return PJ_EBUSY; } write_op->op = PJ_IOQUEUE_OP_SEND_TO; write_op->buf = (char*)data; write_op->size = *length; write_op->written = 0; write_op->flags = flags; pj_memcpy(&write_op->rmt_addr, addr, addrlen); write_op->rmt_addrlen = addrlen; pj_ioqueue_lock_key(key); /* Check again. Handle may have been closed after the previous check * in multithreaded app. If we add bad handle to the set it will * corrupt the ioqueue set. See #913 */ if (IS_CLOSING(key)) { pj_ioqueue_unlock_key(key); return PJ_ECANCELLED; } pj_list_insert_before(&key->write_list, write_op); ioqueue_add_to_set(key->ioqueue, key, WRITEABLE_EVENT); pj_ioqueue_unlock_key(key); return PJ_EPENDING; }
/* Resolve the IP address of local machine */ PJ_DEF(pj_status_t) pj_gethostip(int af, pj_sockaddr *addr) { unsigned i, count, cand_cnt; enum { CAND_CNT = 8, /* Weighting to be applied to found addresses */ WEIGHT_HOSTNAME = 1, /* hostname IP is not always valid! */ WEIGHT_DEF_ROUTE = 2, WEIGHT_INTERFACE = 1, WEIGHT_LOOPBACK = -5, WEIGHT_LINK_LOCAL = -4, WEIGHT_DISABLED = -50, MIN_WEIGHT = WEIGHT_DISABLED+1 /* minimum weight to use */ }; /* candidates: */ pj_sockaddr cand_addr[CAND_CNT]; int cand_weight[CAND_CNT]; int selected_cand; char strip[PJ_INET6_ADDRSTRLEN+10]; /* Special IPv4 addresses. */ struct spec_ipv4_t { pj_uint32_t addr; pj_uint32_t mask; int weight; } spec_ipv4[] = { /* 127.0.0.0/8, loopback addr will be used if there is no other * addresses. */ { 0x7f000000, 0xFF000000, WEIGHT_LOOPBACK }, /* 0.0.0.0/8, special IP that doesn't seem to be practically useful */ { 0x00000000, 0xFF000000, WEIGHT_DISABLED }, /* 169.254.0.0/16, a zeroconf/link-local address, which has higher * priority than loopback and will be used if there is no other * valid addresses. */ { 0xa9fe0000, 0xFFFF0000, WEIGHT_LINK_LOCAL } }; /* Special IPv6 addresses */ struct spec_ipv6_t { pj_uint8_t addr[16]; pj_uint8_t mask[16]; int weight; } spec_ipv6[] = { /* Loopback address, ::1/128 */ { {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1}, {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff}, WEIGHT_LOOPBACK }, /* Link local, fe80::/10 */ { {0xfe,0x80,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, {0xff,0xc0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, WEIGHT_LINK_LOCAL }, /* Disabled, ::/128 */ { {0x0,0x0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, { 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff}, WEIGHT_DISABLED } }; pj_addrinfo ai; pj_status_t status; /* May not be used if TRACE_ is disabled */ PJ_UNUSED_ARG(strip); #ifdef _MSC_VER /* Get rid of "uninitialized he variable" with MS compilers */ pj_bzero(&ai, sizeof(ai)); #endif cand_cnt = 0; pj_bzero(cand_addr, sizeof(cand_addr)); pj_bzero(cand_weight, sizeof(cand_weight)); for (i=0; i<PJ_ARRAY_SIZE(cand_addr); ++i) { cand_addr[i].addr.sa_family = (pj_uint16_t)af; PJ_SOCKADDR_RESET_LEN(&cand_addr[i]); } addr->addr.sa_family = (pj_uint16_t)af; PJ_SOCKADDR_RESET_LEN(addr); #if !defined(PJ_GETHOSTIP_DISABLE_LOCAL_RESOLUTION) || \ PJ_GETHOSTIP_DISABLE_LOCAL_RESOLUTION == 0 /* Get hostname's IP address */ count = 1; status = pj_getaddrinfo(af, pj_gethostname(), &count, &ai); if (status == PJ_SUCCESS) { pj_assert(ai.ai_addr.addr.sa_family == (pj_uint16_t)af); pj_sockaddr_copy_addr(&cand_addr[cand_cnt], &ai.ai_addr); pj_sockaddr_set_port(&cand_addr[cand_cnt], 0); cand_weight[cand_cnt] += WEIGHT_HOSTNAME; ++cand_cnt; TRACE_((THIS_FILE, "hostname IP is %s", pj_sockaddr_print(&ai.ai_addr, strip, sizeof(strip), 0))); } #else PJ_UNUSED_ARG(ai); #endif /* Get default interface (interface for default route) */ if (cand_cnt < PJ_ARRAY_SIZE(cand_addr)) { status = pj_getdefaultipinterface(af, addr); if (status == PJ_SUCCESS) { TRACE_((THIS_FILE, "default IP is %s", pj_sockaddr_print(addr, strip, sizeof(strip), 0))); pj_sockaddr_set_port(addr, 0); for (i=0; i<cand_cnt; ++i) { if (pj_sockaddr_cmp(&cand_addr[i], addr)==0) break; } cand_weight[i] += WEIGHT_DEF_ROUTE; if (i >= cand_cnt) { pj_sockaddr_copy_addr(&cand_addr[i], addr); ++cand_cnt; } } } /* Enumerate IP interfaces */ if (cand_cnt < PJ_ARRAY_SIZE(cand_addr)) { unsigned start_if = cand_cnt; count = PJ_ARRAY_SIZE(cand_addr) - start_if; status = pj_enum_ip_interface(af, &count, &cand_addr[start_if]); if (status == PJ_SUCCESS && count) { /* Clear the port number */ for (i=0; i<count; ++i) pj_sockaddr_set_port(&cand_addr[start_if+i], 0); /* For each candidate that we found so far (that is the hostname * address and default interface address, check if they're found * in the interface list. If found, add the weight, and if not, * decrease the weight. */ for (i=0; i<cand_cnt; ++i) { unsigned j; for (j=0; j<count; ++j) { if (pj_sockaddr_cmp(&cand_addr[i], &cand_addr[start_if+j])==0) break; } if (j == count) { /* Not found */ cand_weight[i] -= WEIGHT_INTERFACE; } else { cand_weight[i] += WEIGHT_INTERFACE; } } /* Add remaining interface to candidate list. */ for (i=0; i<count; ++i) { unsigned j; for (j=0; j<cand_cnt; ++j) { if (pj_sockaddr_cmp(&cand_addr[start_if+i], &cand_addr[j])==0) break; } if (j == cand_cnt) { pj_sockaddr_copy_addr(&cand_addr[cand_cnt], &cand_addr[start_if+i]); cand_weight[cand_cnt] += WEIGHT_INTERFACE; ++cand_cnt; } } } } /* Apply weight adjustment for special IPv4/IPv6 addresses * See http://trac.pjsip.org/repos/ticket/1046 */ if (af == PJ_AF_INET) { for (i=0; i<cand_cnt; ++i) { unsigned j; for (j=0; j<PJ_ARRAY_SIZE(spec_ipv4); ++j) { pj_uint32_t a = pj_ntohl(cand_addr[i].ipv4.sin_addr.s_addr); pj_uint32_t pa = spec_ipv4[j].addr; pj_uint32_t pm = spec_ipv4[j].mask; if ((a & pm) == pa) { cand_weight[i] += spec_ipv4[j].weight; break; } } } } else if (af == PJ_AF_INET6) { for (i=0; i<PJ_ARRAY_SIZE(spec_ipv6); ++i) { unsigned j; for (j=0; j<cand_cnt; ++j) { pj_uint8_t *a = cand_addr[j].ipv6.sin6_addr.s6_addr; pj_uint8_t am[16]; pj_uint8_t *pa = spec_ipv6[i].addr; pj_uint8_t *pm = spec_ipv6[i].mask; unsigned k; for (k=0; k<16; ++k) { am[k] = (pj_uint8_t)((a[k] & pm[k]) & 0xFF); } if (pj_memcmp(am, pa, 16)==0) { cand_weight[j] += spec_ipv6[i].weight; } } } } else { return PJ_EAFNOTSUP; } /* Enumerate candidates to get the best IP address to choose */ selected_cand = -1; for (i=0; i<cand_cnt; ++i) { TRACE_((THIS_FILE, "Checking candidate IP %s, weight=%d", pj_sockaddr_print(&cand_addr[i], strip, sizeof(strip), 0), cand_weight[i])); if (cand_weight[i] < MIN_WEIGHT) { continue; } if (selected_cand == -1) selected_cand = i; else if (cand_weight[i] > cand_weight[selected_cand]) selected_cand = i; } /* If else fails, returns loopback interface as the last resort */ if (selected_cand == -1) { if (af==PJ_AF_INET) { addr->ipv4.sin_addr.s_addr = pj_htonl (0x7f000001); } else { pj_in6_addr *s6_addr; s6_addr = (pj_in6_addr*) pj_sockaddr_get_addr(addr); pj_bzero(s6_addr, sizeof(pj_in6_addr)); s6_addr->s6_addr[15] = 1; } TRACE_((THIS_FILE, "Loopback IP %s returned", pj_sockaddr_print(addr, strip, sizeof(strip), 0))); } else { pj_sockaddr_copy_addr(addr, &cand_addr[selected_cand]); TRACE_((THIS_FILE, "Candidate %s selected", pj_sockaddr_print(addr, strip, sizeof(strip), 0))); } return PJ_SUCCESS; }
/* * Callback upon request completion. */ static void on_request_complete(pj_stun_session *stun_sess, pj_status_t status, void *token, pj_stun_tx_data *tdata, const pj_stun_msg *response, const pj_sockaddr_t *src_addr, unsigned src_addr_len) { nat_detect_session *sess; pj_stun_sockaddr_attr *mattr = NULL; pj_stun_changed_addr_attr *ca = NULL; pj_uint32_t *tsx_id; int cmp; unsigned test_id; PJ_UNUSED_ARG(token); PJ_UNUSED_ARG(tdata); PJ_UNUSED_ARG(src_addr); PJ_UNUSED_ARG(src_addr_len); sess = (nat_detect_session*) pj_stun_session_get_user_data(stun_sess); pj_mutex_lock(sess->mutex); /* Find errors in the response */ if (status == PJ_SUCCESS) { /* Check error message */ if (PJ_STUN_IS_ERROR_RESPONSE(response->hdr.type)) { pj_stun_errcode_attr *eattr; int err_code; eattr = (pj_stun_errcode_attr*) pj_stun_msg_find_attr(response, PJ_STUN_ATTR_ERROR_CODE, 0); if (eattr != NULL) err_code = eattr->err_code; else err_code = PJ_STUN_SC_SERVER_ERROR; status = PJ_STATUS_FROM_STUN_CODE(err_code); } else { /* Get MAPPED-ADDRESS or XOR-MAPPED-ADDRESS */ mattr = (pj_stun_sockaddr_attr*) pj_stun_msg_find_attr(response, PJ_STUN_ATTR_XOR_MAPPED_ADDR, 0); if (mattr == NULL) { mattr = (pj_stun_sockaddr_attr*) pj_stun_msg_find_attr(response, PJ_STUN_ATTR_MAPPED_ADDR, 0); } if (mattr == NULL) { status = PJNATH_ESTUNNOMAPPEDADDR; } /* Get CHANGED-ADDRESS attribute */ ca = (pj_stun_changed_addr_attr*) pj_stun_msg_find_attr(response, PJ_STUN_ATTR_CHANGED_ADDR, 0); if (ca == NULL) { status = PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_SERVER_ERROR); } } } /* Save the result */ tsx_id = (pj_uint32_t*) tdata->msg->hdr.tsx_id; test_id = tsx_id[2]; if (test_id >= ST_MAX) { PJ_LOG(4,(sess->pool->obj_name, "Invalid transaction ID %u in response", test_id)); end_session(sess, PJ_STATUS_FROM_STUN_CODE(PJ_STUN_SC_SERVER_ERROR), PJ_STUN_NAT_TYPE_ERR_UNKNOWN); goto on_return; } PJ_LOG(5,(sess->pool->obj_name, "Completed %s, status=%d", test_names[test_id], status)); sess->result[test_id].complete = PJ_TRUE; sess->result[test_id].status = status; if (status == PJ_SUCCESS) { pj_memcpy(&sess->result[test_id].ma, &mattr->sockaddr.ipv4, sizeof(pj_sockaddr_in)); pj_memcpy(&sess->result[test_id].ca, &ca->sockaddr.ipv4, sizeof(pj_sockaddr_in)); } /* Send Test 1B only when Test 2 completes. Must not send Test 1B * before Test 2 completes to avoid creating mapping on the NAT. */ if (!sess->result[ST_TEST_1B].executed && sess->result[ST_TEST_2].complete && sess->result[ST_TEST_2].status != PJ_SUCCESS && sess->result[ST_TEST_1].complete && sess->result[ST_TEST_1].status == PJ_SUCCESS) { cmp = pj_memcmp(&sess->local_addr, &sess->result[ST_TEST_1].ma, sizeof(pj_sockaddr_in)); if (cmp != 0) send_test(sess, ST_TEST_1B, &sess->result[ST_TEST_1].ca, 0); } if (test_completed(sess)<3 || test_completed(sess)!=test_executed(sess)) goto on_return; /* Handle the test result according to RFC 3489 page 22: +--------+ | Test | | 1 | +--------+ | | V /\ /\ N / \ Y / \ Y +--------+ UDP <-------/Resp\--------->/ IP \------------->| Test | Blocked \ ? / \Same/ | 2 | \ / \? / +--------+ \/ \/ | | N | | V V /\ +--------+ Sym. N / \ | Test | UDP <---/Resp\ | 2 | Firewall \ ? / +--------+ \ / | \/ V |Y /\ /\ | Symmetric N / \ +--------+ N / \ V NAT <--- / IP \<-----| Test |<--- /Resp\ Open \Same/ | 1B | \ ? / Internet \? / +--------+ \ / \/ \/ | |Y | | | V | Full | Cone V /\ +--------+ / \ Y | Test |------>/Resp\---->Restricted | 3 | \ ? / +--------+ \ / \/ |N | Port +------>Restricted Figure 2: Flow for type discovery process */ switch (sess->result[ST_TEST_1].status) { case PJNATH_ESTUNTIMEDOUT: /* * Test 1 has timed-out. Conclude with NAT_TYPE_BLOCKED. */ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_BLOCKED); break; case PJ_SUCCESS: /* * Test 1 is successful. Further tests are needed to detect * NAT type. Compare the MAPPED-ADDRESS with the local address. */ cmp = pj_memcmp(&sess->local_addr, &sess->result[ST_TEST_1].ma, sizeof(pj_sockaddr_in)); if (cmp==0) { /* * MAPPED-ADDRESS and local address is equal. Need one more * test to determine NAT type. */ switch (sess->result[ST_TEST_2].status) { case PJ_SUCCESS: /* * Test 2 is also successful. We're in the open. */ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_OPEN); break; case PJNATH_ESTUNTIMEDOUT: /* * Test 2 has timed out. We're behind somekind of UDP * firewall. */ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_SYMMETRIC_UDP); break; default: /* * We've got other error with Test 2. */ end_session(sess, sess->result[ST_TEST_2].status, PJ_STUN_NAT_TYPE_ERR_UNKNOWN); break; } } else { /* * MAPPED-ADDRESS is different than local address. * We're behind NAT. */ switch (sess->result[ST_TEST_2].status) { case PJ_SUCCESS: /* * Test 2 is successful. We're behind a full-cone NAT. */ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_FULL_CONE); break; case PJNATH_ESTUNTIMEDOUT: /* * Test 2 has timed-out Check result of test 1B.. */ switch (sess->result[ST_TEST_1B].status) { case PJ_SUCCESS: /* * Compare the MAPPED-ADDRESS of test 1B with the * MAPPED-ADDRESS returned in test 1.. */ cmp = pj_memcmp(&sess->result[ST_TEST_1].ma, &sess->result[ST_TEST_1B].ma, sizeof(pj_sockaddr_in)); if (cmp != 0) { /* * MAPPED-ADDRESS is different, we're behind a * symmetric NAT. */ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_SYMMETRIC); } else { /* * MAPPED-ADDRESS is equal. We're behind a restricted * or port-restricted NAT, depending on the result of * test 3. */ switch (sess->result[ST_TEST_3].status) { case PJ_SUCCESS: /* * Test 3 is successful, we're behind a restricted * NAT. */ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_RESTRICTED); break; case PJNATH_ESTUNTIMEDOUT: /* * Test 3 failed, we're behind a port restricted * NAT. */ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_PORT_RESTRICTED); break; default: /* * Got other error with test 3. */ end_session(sess, sess->result[ST_TEST_3].status, PJ_STUN_NAT_TYPE_ERR_UNKNOWN); break; } } break; case PJNATH_ESTUNTIMEDOUT: /* * Strangely test 1B has failed. Maybe connectivity was * lost? Or perhaps port 3489 (the usual port number in * CHANGED-ADDRESS) is blocked? */ switch (sess->result[ST_TEST_3].status) { case PJ_SUCCESS: /* Although test 1B failed, test 3 was successful. * It could be that port 3489 is blocked, while the * NAT itself looks to be a Restricted one. */ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_RESTRICTED); break; default: /* Can't distinguish between Symmetric and Port * Restricted, so set the type to Unknown */ end_session(sess, PJ_SUCCESS, PJ_STUN_NAT_TYPE_ERR_UNKNOWN); break; } break; default: /* * Got other error with test 1B. */ end_session(sess, sess->result[ST_TEST_1B].status, PJ_STUN_NAT_TYPE_ERR_UNKNOWN); break; } break; default: /* * We've got other error with Test 2. */ end_session(sess, sess->result[ST_TEST_2].status, PJ_STUN_NAT_TYPE_ERR_UNKNOWN); break; } } break; default: /* * We've got other error with Test 1. */ end_session(sess, sess->result[ST_TEST_1].status, PJ_STUN_NAT_TYPE_ERR_UNKNOWN); break; } on_return: pj_mutex_unlock(sess->mutex); }
/* * This callback is called by active socket when pending accept() operation * has completed. */ static pj_bool_t on_accept_complete(pj_activesock_t *asock, pj_sock_t sock, const pj_sockaddr_t *src_addr, int src_addr_len) { struct tcp_listener *listener; struct tcp_transport *tcp; char addr[PJ_INET6_ADDRSTRLEN+10]; pjsip_tp_state_callback state_cb; pj_sockaddr tmp_src_addr; pj_status_t status; PJ_UNUSED_ARG(src_addr_len); listener = (struct tcp_listener*) pj_activesock_get_user_data(asock); PJ_ASSERT_RETURN(sock != PJ_INVALID_SOCKET, PJ_TRUE); PJ_LOG(4,(listener->factory.obj_name, "TCP listener %.*s:%d: got incoming TCP connection " "from %s, sock=%d", (int)listener->factory.addr_name.host.slen, listener->factory.addr_name.host.ptr, listener->factory.addr_name.port, pj_sockaddr_print(src_addr, addr, sizeof(addr), 3), sock)); /* Apply QoS, if specified */ status = pj_sock_apply_qos2(sock, listener->qos_type, &listener->qos_params, 2, listener->factory.obj_name, "incoming SIP TCP socket"); /* tcp_create() expect pj_sockaddr, so copy src_addr to temporary var, * just in case. */ pj_bzero(&tmp_src_addr, sizeof(tmp_src_addr)); pj_sockaddr_cp(&tmp_src_addr, src_addr); /* * Incoming connection! * Create TCP transport for the new socket. */ status = tcp_create( listener, NULL, sock, PJ_TRUE, &listener->factory.local_addr, &tmp_src_addr, &tcp); if (status == PJ_SUCCESS) { status = tcp_start_read(tcp); if (status != PJ_SUCCESS) { PJ_LOG(3,(tcp->base.obj_name, "New transport cancelled")); tcp_destroy(&tcp->base, status); } else { /* Start keep-alive timer */ if (PJSIP_TCP_KEEP_ALIVE_INTERVAL) { pj_time_val delay = {PJSIP_TCP_KEEP_ALIVE_INTERVAL, 0}; pjsip_endpt_schedule_timer(listener->endpt, &tcp->ka_timer, &delay); tcp->ka_timer.id = PJ_TRUE; pj_gettimeofday(&tcp->last_activity); } /* Notify application of transport state accepted */ state_cb = pjsip_tpmgr_get_state_cb(tcp->base.tpmgr); if (state_cb) { pjsip_transport_state_info state_info; pj_bzero(&state_info, sizeof(state_info)); (*state_cb)(&tcp->base, PJSIP_TP_STATE_CONNECTED, &state_info); } } } return PJ_TRUE; }
static void ioqueue_on_accept_complete(pj_ioqueue_key_t *key, pj_ioqueue_op_key_t *op_key, pj_sock_t new_sock, pj_status_t status) { pj_activesock_t *asock = (pj_activesock_t*) pj_ioqueue_get_user_data(key); struct accept_op *accept_op = (struct accept_op*) op_key; PJ_UNUSED_ARG(new_sock); /* Ignore if we've been shutdown */ if (asock->shutdown) return; do { if (status == asock->last_err && status != PJ_SUCCESS) { asock->err_counter++; if (asock->err_counter >= PJ_ACTIVESOCK_MAX_CONSECUTIVE_ACCEPT_ERROR) { PJ_LOG(3, ("", "Received %d consecutive errors: %d for the accept()" " operation, stopping further ioqueue accepts.", asock->err_counter, asock->last_err)); if ((status == PJ_STATUS_FROM_OS(OSERR_EWOULDBLOCK)) && (asock->cb.on_accept_complete2)) { (*asock->cb.on_accept_complete2)(asock, accept_op->new_sock, &accept_op->rem_addr, accept_op->rem_addr_len, PJ_ESOCKETSTOP); } return; } } else { asock->err_counter = 0; asock->last_err = status; } if (status==PJ_SUCCESS && (asock->cb.on_accept_complete2 || asock->cb.on_accept_complete)) { pj_bool_t ret; /* Notify callback */ if (asock->cb.on_accept_complete2) { ret = (*asock->cb.on_accept_complete2)(asock, accept_op->new_sock, &accept_op->rem_addr, accept_op->rem_addr_len, status); } else { ret = (*asock->cb.on_accept_complete)(asock, accept_op->new_sock, &accept_op->rem_addr, accept_op->rem_addr_len); } /* If callback returns false, we have been destroyed! */ if (!ret) return; #if defined(PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT) && \ PJ_IPHONE_OS_HAS_MULTITASKING_SUPPORT!=0 activesock_create_iphone_os_stream(asock); #endif } else if (status==PJ_SUCCESS) { /* Application doesn't handle the new socket, we need to * close it to avoid resource leak. */ pj_sock_close(accept_op->new_sock); } /* Don't start another accept() if we've been shutdown */ if (asock->shutdown) return; /* Prepare next accept() */ accept_op->new_sock = PJ_INVALID_SOCKET; accept_op->rem_addr_len = sizeof(accept_op->rem_addr); status = pj_ioqueue_accept(asock->key, op_key, &accept_op->new_sock, NULL, &accept_op->rem_addr, &accept_op->rem_addr_len); } while (status != PJ_EPENDING && status != PJ_ECANCELLED); }
/* * Set the thread priority. */ PJ_DEF(pj_status_t) pj_thread_set_prio(pj_thread_t *thread, int prio) { PJ_UNUSED_ARG(thread); PJ_UNUSED_ARG(prio); return PJ_SUCCESS; }
/* * This is the main function for performing server resolution. */ PJ_DEF(void) pjsip_resolve( pjsip_resolver_t *resolver, pj_pool_t *pool, const pjsip_host_info *target, void *token, pjsip_resolver_callback *cb) { pjsip_server_addresses svr_addr; pj_status_t status = PJ_SUCCESS; int ip_addr_ver; struct query *query; pjsip_transport_type_e type = target->type; int af = pj_AF_UNSPEC(); /* If an external implementation has been provided use it instead */ if (resolver->ext_res) { (*resolver->ext_res->resolve)(resolver, pool, target, token, cb); return; } /* Is it IP address or hostname? And if it's an IP, which version? */ ip_addr_ver = get_ip_addr_ver(&target->addr.host); /* Initialize address family type. Unfortunately, target type doesn't * really tell the address family type, except when IPv6 flag is * explicitly set. */ #if defined(PJ_HAS_IPV6) && PJ_HAS_IPV6==1 if ((ip_addr_ver == 6) || (type & PJSIP_TRANSPORT_IPV6)) af = pj_AF_INET6(); else if (ip_addr_ver == 4) af = pj_AF_INET(); #else /* IPv6 is disabled, will resolving IPv6 address be useful? */ af = pj_AF_INET(); #endif /* Set the transport type if not explicitly specified. * RFC 3263 section 4.1 specify rules to set up this. */ if (type == PJSIP_TRANSPORT_UNSPECIFIED) { if (ip_addr_ver || (target->addr.port != 0)) { #if PJ_HAS_TCP if (target->flag & PJSIP_TRANSPORT_SECURE) { type = PJSIP_TRANSPORT_TLS; } else if (target->flag & PJSIP_TRANSPORT_RELIABLE) { type = PJSIP_TRANSPORT_TCP; } else #endif { type = PJSIP_TRANSPORT_UDP; } } else { /* No type or explicit port is specified, and the address is * not IP address. * In this case, full NAPTR resolution must be performed. * But we don't support it (yet). */ #if PJ_HAS_TCP if (target->flag & PJSIP_TRANSPORT_SECURE) { type = PJSIP_TRANSPORT_TLS; } else if (target->flag & PJSIP_TRANSPORT_RELIABLE) { type = PJSIP_TRANSPORT_TCP; } else #endif { type = PJSIP_TRANSPORT_UDP; } } } /* If target is an IP address, or if resolver is not configured, * we can just finish the resolution now using pj_gethostbyname() */ if (ip_addr_ver || resolver->res == NULL) { char addr_str[PJ_INET6_ADDRSTRLEN+10]; pj_uint16_t srv_port; if (ip_addr_ver != 0) { /* Target is an IP address, no need to resolve */ if (ip_addr_ver == 4) { pj_sockaddr_init(pj_AF_INET(), &svr_addr.entry[0].addr, NULL, 0); pj_inet_pton(pj_AF_INET(), &target->addr.host, &svr_addr.entry[0].addr.ipv4.sin_addr); } else { pj_sockaddr_init(pj_AF_INET6(), &svr_addr.entry[0].addr, NULL, 0); pj_inet_pton(pj_AF_INET6(), &target->addr.host, &svr_addr.entry[0].addr.ipv6.sin6_addr); } } else { pj_addrinfo ai; unsigned count; PJ_LOG(5,(THIS_FILE, "DNS resolver not available, target '%.*s:%d' type=%s " "will be resolved with getaddrinfo()", target->addr.host.slen, target->addr.host.ptr, target->addr.port, pjsip_transport_get_type_name(target->type))); /* Resolve */ count = 1; status = pj_getaddrinfo(af, &target->addr.host, &count, &ai); if (status != PJ_SUCCESS) { /* "Normalize" error to PJ_ERESOLVE. This is a special error * because it will be translated to SIP status 502 by * sip_transaction.c */ status = PJ_ERESOLVE; goto on_error; } pj_sockaddr_cp(&svr_addr.entry[0].addr, &ai.ai_addr); if (af == pj_AF_UNSPEC()) af = ai.ai_addr.addr.sa_family; } /* After address resolution, update IPv6 bitflag in transport type. */ if (af == pj_AF_INET6()) type |= PJSIP_TRANSPORT_IPV6; /* Set the port number */ if (target->addr.port == 0) { srv_port = (pj_uint16_t) pjsip_transport_get_default_port_for_type(type); } else { srv_port = (pj_uint16_t)target->addr.port; } pj_sockaddr_set_port(&svr_addr.entry[0].addr, srv_port); /* Call the callback. */ PJ_LOG(5,(THIS_FILE, "Target '%.*s:%d' type=%s resolved to " "'%s' type=%s (%s)", (int)target->addr.host.slen, target->addr.host.ptr, target->addr.port, pjsip_transport_get_type_name(target->type), pj_sockaddr_print(&svr_addr.entry[0].addr, addr_str, sizeof(addr_str), 3), pjsip_transport_get_type_name(type), pjsip_transport_get_type_desc(type))); svr_addr.count = 1; svr_addr.entry[0].priority = 0; svr_addr.entry[0].weight = 0; svr_addr.entry[0].type = type; svr_addr.entry[0].addr_len = pj_sockaddr_get_len(&svr_addr.entry[0].addr); (*cb)(status, token, &svr_addr); /* Done. */ return; } /* Target is not an IP address so we need to resolve it. */ #if PJSIP_HAS_RESOLVER /* Build the query state */ query = PJ_POOL_ZALLOC_T(pool, struct query); query->objname = THIS_FILE; query->token = token; query->cb = cb; query->req.target = *target; pj_strdup(pool, &query->req.target.addr.host, &target->addr.host); /* If port is not specified, start with SRV resolution * (should be with NAPTR, but we'll do that later) */ PJ_TODO(SUPPORT_DNS_NAPTR); /* Build dummy NAPTR entry */ query->naptr_cnt = 1; pj_bzero(&query->naptr[0], sizeof(query->naptr[0])); query->naptr[0].order = 0; query->naptr[0].pref = 0; query->naptr[0].type = type; pj_strdup(pool, &query->naptr[0].name, &target->addr.host); /* Start DNS SRV or A resolution, depending on whether port is specified */ if (target->addr.port == 0) { query->query_type = PJ_DNS_TYPE_SRV; query->req.def_port = 5060; if (type == PJSIP_TRANSPORT_TLS || type == PJSIP_TRANSPORT_TLS6) { query->naptr[0].res_type = pj_str("_sips._tcp."); query->req.def_port = 5061; } else if (type == PJSIP_TRANSPORT_TCP || type == PJSIP_TRANSPORT_TCP6) query->naptr[0].res_type = pj_str("_sip._tcp."); else if (type == PJSIP_TRANSPORT_UDP || type == PJSIP_TRANSPORT_UDP6) query->naptr[0].res_type = pj_str("_sip._udp."); else { pj_assert(!"Unknown transport type"); query->naptr[0].res_type = pj_str("_sip._udp."); } } else { /* Otherwise if port is specified, start with A (or AAAA) host * resolution */ query->query_type = PJ_DNS_TYPE_A; query->naptr[0].res_type.slen = 0; query->req.def_port = target->addr.port; } /* Start the asynchronous query */ PJ_LOG(5, (query->objname, "Starting async DNS %s query: target=%.*s%.*s, transport=%s, " "port=%d", pj_dns_get_type_name(query->query_type), (int)query->naptr[0].res_type.slen, query->naptr[0].res_type.ptr, (int)query->naptr[0].name.slen, query->naptr[0].name.ptr, pjsip_transport_get_type_name(target->type), target->addr.port)); if (query->query_type == PJ_DNS_TYPE_SRV) { int opt = 0; if (af == pj_AF_UNSPEC()) opt = PJ_DNS_SRV_FALLBACK_A | PJ_DNS_SRV_FALLBACK_AAAA | PJ_DNS_SRV_RESOLVE_AAAA; else if (af == pj_AF_INET6()) opt = PJ_DNS_SRV_FALLBACK_AAAA | PJ_DNS_SRV_RESOLVE_AAAA_ONLY; else /* af == pj_AF_INET() */ opt = PJ_DNS_SRV_FALLBACK_A; status = pj_dns_srv_resolve(&query->naptr[0].name, &query->naptr[0].res_type, query->req.def_port, pool, resolver->res, opt, query, &srv_resolver_cb, NULL); } else if (query->query_type == PJ_DNS_TYPE_A) { /* Resolve DNS A record if address family is not fixed to IPv6 */ if (af != pj_AF_INET6()) { /* If there will be DNS AAAA query too, let's setup a dummy one * here, otherwise app callback may be called immediately (before * DNS AAAA query is sent) when DNS A record is available in the * cache. */ if (af == pj_AF_UNSPEC()) query->object6 = (pj_dns_async_query*)0x1; status = pj_dns_resolver_start_query(resolver->res, &query->naptr[0].name, PJ_DNS_TYPE_A, 0, &dns_a_callback, query, &query->object); } /* Resolve DNS AAAA record if address family is not fixed to IPv4 */ if (af != pj_AF_INET() && status == PJ_SUCCESS) { status = pj_dns_resolver_start_query(resolver->res, &query->naptr[0].name, PJ_DNS_TYPE_AAAA, 0, &dns_aaaa_callback, query, &query->object6); } } else { pj_assert(!"Unexpected"); status = PJ_EBUG; } if (status != PJ_SUCCESS) goto on_error; return; #else /* PJSIP_HAS_RESOLVER */ PJ_UNUSED_ARG(pool); PJ_UNUSED_ARG(query); PJ_UNUSED_ARG(srv_name); #endif /* PJSIP_HAS_RESOLVER */ on_error: if (status != PJ_SUCCESS) { char errmsg[PJ_ERR_MSG_SIZE]; PJ_LOG(4,(THIS_FILE, "Failed to resolve '%.*s'. Err=%d (%s)", (int)target->addr.host.slen, target->addr.host.ptr, status, pj_strerror(status,errmsg,sizeof(errmsg)).ptr)); (*cb)(status, token, NULL); return; } }
/* * pj_thread_get_os_handle() */ PJ_DEF(void*) pj_thread_get_os_handle(pj_thread_t *thread) { PJ_UNUSED_ARG(thread); return NULL; }
/* * Destroy. */ PJ_DEF(pj_status_t) echo_supp_destroy(void *state) { PJ_UNUSED_ARG(state); return PJ_SUCCESS; }
/* * pj_thread_destroy() */ PJ_DEF(pj_status_t) pj_thread_destroy(pj_thread_t *rec) { PJ_UNUSED_ARG(rec); return PJ_EINVALIDOP; }
/* Get pool name */ PJ_DEF(const char*) pj_pool_getobjname_imp(pj_pool_t *pool) { PJ_UNUSED_ARG(pool); return "pooldbg"; }
/* * Get the endpoint where this UA is currently registered. */ PJ_DEF(pjsip_endpoint*) pjsip_ua_get_endpt(pjsip_user_agent *ua) { PJ_UNUSED_ARG(ua); pj_assert(ua == &mod_ua.mod); return mod_ua.endpt; }
/* API: refresh the list of devices */ static pj_status_t opengl_factory_refresh(pjmedia_vid_dev_factory *f) { PJ_UNUSED_ARG(f); return PJ_SUCCESS; }
/* * Poll the I/O Completion Port, execute callback, * and return the key and bytes transfered of the last operation. */ static pj_bool_t poll_iocp( HANDLE hIocp, DWORD dwTimeout, pj_ssize_t *p_bytes, pj_ioqueue_key_t **p_key ) { DWORD dwBytesTransfered, dwKey; generic_overlapped *pOv; pj_ioqueue_key_t *key; pj_ssize_t size_status = -1; BOOL rcGetQueued; /* Poll for completion status. */ rcGetQueued = GetQueuedCompletionStatus(hIocp, &dwBytesTransfered, &dwKey, (OVERLAPPED**)&pOv, dwTimeout); /* The return value is: * - nonzero if event was dequeued. * - zero and pOv==NULL if no event was dequeued. * - zero and pOv!=NULL if event for failed I/O was dequeued. */ if (pOv) { pj_bool_t has_lock; /* Event was dequeued for either successfull or failed I/O */ key = (pj_ioqueue_key_t*)dwKey; size_status = dwBytesTransfered; /* Report to caller regardless */ if (p_bytes) *p_bytes = size_status; if (p_key) *p_key = key; #if PJ_IOQUEUE_HAS_SAFE_UNREG /* We shouldn't call callbacks if key is quitting. */ if (key->closing) return PJ_TRUE; /* If concurrency is disabled, lock the key * (and save the lock status to local var since app may change * concurrency setting while in the callback) */ if (key->allow_concurrent == PJ_FALSE) { pj_mutex_lock(key->mutex); has_lock = PJ_TRUE; } else { has_lock = PJ_FALSE; } /* Now that we get the lock, check again that key is not closing */ if (key->closing) { if (has_lock) { pj_mutex_unlock(key->mutex); } return PJ_TRUE; } /* Increment reference counter to prevent this key from being * deleted */ pj_atomic_inc(key->ref_count); #else PJ_UNUSED_ARG(has_lock); #endif /* Carry out the callback */ switch (pOv->operation) { case PJ_IOQUEUE_OP_READ: case PJ_IOQUEUE_OP_RECV: case PJ_IOQUEUE_OP_RECV_FROM: pOv->operation = 0; if (key->cb.on_read_complete) key->cb.on_read_complete(key, (pj_ioqueue_op_key_t*)pOv, size_status); break; case PJ_IOQUEUE_OP_WRITE: case PJ_IOQUEUE_OP_SEND: case PJ_IOQUEUE_OP_SEND_TO: pOv->operation = 0; if (key->cb.on_write_complete) key->cb.on_write_complete(key, (pj_ioqueue_op_key_t*)pOv, size_status); break; #if PJ_HAS_TCP case PJ_IOQUEUE_OP_ACCEPT: /* special case for accept. */ ioqueue_on_accept_complete(key, (ioqueue_accept_rec*)pOv); if (key->cb.on_accept_complete) { ioqueue_accept_rec *accept_rec = (ioqueue_accept_rec*)pOv; pj_status_t status = PJ_SUCCESS; pj_sock_t newsock; newsock = accept_rec->newsock; accept_rec->newsock = PJ_INVALID_SOCKET; if (newsock == PJ_INVALID_SOCKET) { int dwError = WSAGetLastError(); if (dwError == 0) dwError = OSERR_ENOTCONN; status = PJ_RETURN_OS_ERROR(dwError); } key->cb.on_accept_complete(key, (pj_ioqueue_op_key_t*)pOv, newsock, status); } break; case PJ_IOQUEUE_OP_CONNECT: #endif case PJ_IOQUEUE_OP_NONE: pj_assert(0); break; } #if PJ_IOQUEUE_HAS_SAFE_UNREG decrement_counter(key); if (has_lock) pj_mutex_unlock(key->mutex); #endif return PJ_TRUE; } /* No event was queued. */ return PJ_FALSE; }
static pjsip_uri *create_dummy(pj_pool_t *pool) { PJ_UNUSED_ARG(pool); return NULL; }
static void log_writer_cb(int level, const char *data, int len) { PJ_UNUSED_ARG(level); fwrite(data, len, 1, fLog); }
/* Normally we should throw exception when memory alloc fails. * Here we do nothing so that the flow will go back to original caller, * which will test the result using NULL comparison. Normally caller will * catch the exception instead of checking for NULLs. */ static void null_callback(pj_pool_t *pool, pj_size_t size) { PJ_UNUSED_ARG(pool); PJ_UNUSED_ARG(size); }