/* Remove oldest frames as many as param 'count' */ static unsigned jb_framelist_remove_head(jb_framelist_t *framelist, unsigned count) { if (count > framelist->size) count = framelist->size; if (count) { /* may be done in two steps if overlapping */ unsigned step1,step2; unsigned tmp = framelist->head+count; unsigned i; if (tmp > framelist->max_count) { step1 = framelist->max_count - framelist->head; step2 = count-step1; } else { step1 = count; step2 = 0; } for (i = framelist->head; i < (framelist->head + step1); ++i) { if (framelist->frame_type[i] == PJMEDIA_JB_DISCARDED_FRAME) { pj_assert(framelist->discarded_num > 0); framelist->discarded_num--; } } //pj_bzero(framelist->content + // framelist->head * framelist->frame_size, // step1*framelist->frame_size); pj_memset(framelist->frame_type+framelist->head, PJMEDIA_JB_MISSING_FRAME, step1*sizeof(framelist->frame_type[0])); pj_bzero(framelist->content_len+framelist->head, step1*sizeof(framelist->content_len[0])); if (step2) { for (i = 0; i < step2; ++i) { if (framelist->frame_type[i] == PJMEDIA_JB_DISCARDED_FRAME) { pj_assert(framelist->discarded_num > 0); framelist->discarded_num--; } } //pj_bzero( framelist->content, // step2*framelist->frame_size); pj_memset(framelist->frame_type, PJMEDIA_JB_MISSING_FRAME, step2*sizeof(framelist->frame_type[0])); pj_bzero (framelist->content_len, step2*sizeof(framelist->content_len[0])); } /* update states */ framelist->origin += count; framelist->head = (framelist->head + count) % framelist->max_count; framelist->size -= count; } return count; }
PJ_DEF(pj_status_t) pj_json_writef( const pj_json_elem *elem, pj_json_writer writer, void *user_data) { struct write_state st; PJ_ASSERT_RETURN(elem && writer, PJ_EINVAL); st.writer = writer; st.user_data = user_data; st.indent = 0; pj_memset(st.indent_buf, ' ', MAX_INDENT); pj_memset(st.space, ' ', PJ_JSON_NAME_MIN_LEN); return elem_write(elem, &st, 0); }
// // Default constructor. // Pj_Event_Handler() : key_(NULL) { pj_memset(&timer_, 0, sizeof(timer_)); timer_.user_data = this; timer_.cb = &timer_callback; }
static pj_status_t jb_framelist_reset(jb_framelist_t *framelist) { framelist->head = 0; framelist->origin = INVALID_OFFSET; framelist->size = 0; framelist->discarded_num = 0; //pj_bzero(framelist->content, // framelist->frame_size * // framelist->max_count); pj_memset(framelist->frame_type, PJMEDIA_JB_MISSING_FRAME, sizeof(framelist->frame_type[0]) * framelist->max_count); pj_bzero(framelist->content_len, sizeof(framelist->content_len[0]) * framelist->max_count); //pj_bzero(framelist->bit_info, // sizeof(framelist->bit_info[0]) * // framelist->max_count); return PJ_SUCCESS; }
PJ_DEF(pj_thread_t*) pj_thread_register (const char *cstr_thread_name, pj_thread_desc desc) { pj_thread_t *thread = (pj_thread_t *)desc; pj_str_t thread_name = pj_str((char*)cstr_thread_name); /* Size sanity check. */ if (sizeof(pj_thread_desc) < sizeof(pj_thread_t)) { pj_assert(!"Not enough pj_thread_desc size!"); return NULL; } /* If a thread descriptor has been registered before, just return it. */ if (pj_thread_local_get (thread_tls_id) != 0) { return (pj_thread_t*)pj_thread_local_get (thread_tls_id); } /* Initialize and set the thread entry. */ pj_memset(desc, 0, sizeof(pj_thread_desc)); thread->hthread = GetCurrentThread(); thread->idthread = GetCurrentThreadId(); if (cstr_thread_name && pj_strlen(&thread_name) < sizeof(thread->obj_name)-1) sprintf(thread->obj_name, cstr_thread_name, thread->idthread); else sprintf(thread->obj_name, "thr%p", (void*)thread->idthread); pj_thread_local_set(thread_tls_id, thread); return thread; }
/* Register socket to ioqueue */ static pj_status_t register_to_ioqueue(struct udp_transport *tp) { pj_ioqueue_t *ioqueue; pj_ioqueue_callback ioqueue_cb; pj_status_t status; /* Ignore if already registered */ if (tp->key != NULL) return PJ_SUCCESS; /* Create group lock */ status = pj_grp_lock_create(tp->base.pool, NULL, &tp->grp_lock); if (status != PJ_SUCCESS) return status; pj_grp_lock_add_ref(tp->grp_lock); pj_grp_lock_add_handler(tp->grp_lock, tp->base.pool, tp, &udp_on_destroy); /* Register to ioqueue. */ ioqueue = pjsip_endpt_get_ioqueue(tp->base.endpt); pj_memset(&ioqueue_cb, 0, sizeof(ioqueue_cb)); ioqueue_cb.on_read_complete = &udp_on_read_complete; ioqueue_cb.on_write_complete = &udp_on_write_complete; return pj_ioqueue_register_sock2(tp->base.pool, ioqueue, tp->sock, tp->grp_lock, tp, &ioqueue_cb, &tp->key); }
/* * pj_init(void). * Init PJLIB! */ PJ_DEF(pj_status_t) pj_init(void) { char stack_ptr; pj_status_t status; /* Check if PJLIB have been initialized */ if (initialized) { ++initialized; return PJ_SUCCESS; } pj_ansi_strcpy(main_thread.obj_name, "pjthread"); // Init main thread pj_memset(&main_thread, 0, sizeof(main_thread)); // Initialize PjSymbianOS instance PjSymbianOS *os = PjSymbianOS::Instance(); PJ_LOG(4,(THIS_FILE, "Initializing PJLIB for Symbian OS..")); TInt err; err = os->Initialize(); if (err != KErrNone) return PJ_RETURN_OS_ERROR(err); /* Init logging */ pj_log_init(); /* Initialize exception ID for the pool. * Must do so after critical section is configured. */ status = pj_exception_id_alloc("PJLIB/No memory", &PJ_NO_MEMORY_EXCEPTION); if (status != PJ_SUCCESS) goto on_error; #if defined(PJ_OS_HAS_CHECK_STACK) && PJ_OS_HAS_CHECK_STACK!=0 main_thread.stk_start = &stack_ptr; main_thread.stk_size = 0xFFFFFFFFUL; main_thread.stk_max_usage = 0; #else stack_ptr = '\0'; #endif /* Flag PJLIB as initialized */ ++initialized; pj_assert(initialized == 1); PJ_LOG(5,(THIS_FILE, "PJLIB initialized.")); return PJ_SUCCESS; on_error: pj_shutdown(); return PJ_RETURN_OS_ERROR(err); }
/* Register socket to ioqueue */ static pj_status_t register_to_ioqueue(struct udp_transport *tp) { pj_ioqueue_t *ioqueue; pj_ioqueue_callback ioqueue_cb; /* Ignore if already registered */ if (tp->key != NULL) return PJ_SUCCESS; /* Register to ioqueue. */ ioqueue = pjsip_endpt_get_ioqueue(tp->base.endpt); pj_memset(&ioqueue_cb, 0, sizeof(ioqueue_cb)); ioqueue_cb.on_read_complete = &udp_on_read_complete; ioqueue_cb.on_write_complete = &udp_on_write_complete; return pj_ioqueue_register_sock(tp->base.pool, ioqueue, tp->sock, tp, &ioqueue_cb, &tp->key); }
pj_status_t pj_thread_init(void) { #if PJ_HAS_THREADS pj_memset(&main_thread, 0, sizeof(main_thread)); main_thread.thread = pthread_self(); sprintf(main_thread.obj_name, "thr%p", &main_thread); thread_tls_id = pj_thread_local_alloc(); if (thread_tls_id == -1) { return -1; } pj_thread_local_set(thread_tls_id, &main_thread); return PJ_OK; #else PJ_LOG(2,(THIS_FILE, "Thread init error. Threading is not enabled!")); return -1; #endif }
PJ_DEF(void) pj_log( const char *sender, int level, const char *format, va_list marker) { pj_time_val now; pj_parsed_time ptime; char *pre; #if PJ_LOG_USE_STACK_BUFFER char log_buffer[PJ_LOG_MAX_SIZE]; #endif int saved_level, len, print_len, indent; PJ_CHECK_STACK(); if (level > pj_log_max_level) return; if (is_logging_suspended()) return; /* Temporarily disable logging for this thread. Some of PJLIB APIs that * this function calls below will recursively call the logging function * back, hence it will cause infinite recursive calls if we allow that. */ suspend_logging(&saved_level); /* Get current date/time. */ pj_gettimeofday(&now); pj_time_decode(&now, &ptime); pre = log_buffer; if (log_decor & PJ_LOG_HAS_LEVEL_TEXT) { static const char *ltexts[] = { "FATAL:", "ERROR:", " WARN:", " INFO:", "DEBUG:", "TRACE:", "DETRC:"}; pj_ansi_strcpy(pre, ltexts[level]); pre += 6; } if (log_decor & PJ_LOG_HAS_DAY_NAME) { static const char *wdays[] = { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}; pj_ansi_strcpy(pre, wdays[ptime.wday]); pre += 3; } if (log_decor & PJ_LOG_HAS_YEAR) { if (pre!=log_buffer) *pre++ = ' '; pre += pj_utoa(ptime.year, pre); } if (log_decor & PJ_LOG_HAS_MONTH) { *pre++ = '-'; pre += pj_utoa_pad(ptime.mon+1, pre, 2, '0'); } if (log_decor & PJ_LOG_HAS_DAY_OF_MON) { *pre++ = '-'; pre += pj_utoa_pad(ptime.day, pre, 2, '0'); } if (log_decor & PJ_LOG_HAS_TIME) { if (pre!=log_buffer) *pre++ = ' '; pre += pj_utoa_pad(ptime.hour, pre, 2, '0'); *pre++ = ':'; pre += pj_utoa_pad(ptime.min, pre, 2, '0'); *pre++ = ':'; pre += pj_utoa_pad(ptime.sec, pre, 2, '0'); } if (log_decor & PJ_LOG_HAS_MICRO_SEC) { *pre++ = '.'; pre += pj_utoa_pad(ptime.msec, pre, 3, '0'); } if (log_decor & PJ_LOG_HAS_SENDER) { enum { SENDER_WIDTH = 14 }; int sender_len = strlen(sender); if (pre!=log_buffer) *pre++ = ' '; if (sender_len <= SENDER_WIDTH) { while (sender_len < SENDER_WIDTH) *pre++ = ' ', ++sender_len; while (*sender) *pre++ = *sender++; } else { int i; for (i=0; i<SENDER_WIDTH; ++i) *pre++ = *sender++; } } if (log_decor & PJ_LOG_HAS_THREAD_ID) { enum { THREAD_WIDTH = 12 }; const char *thread_name = pj_thread_get_name(pj_thread_this()); int thread_len = strlen(thread_name); *pre++ = ' '; if (thread_len <= THREAD_WIDTH) { while (thread_len < THREAD_WIDTH) *pre++ = ' ', ++thread_len; while (*thread_name) *pre++ = *thread_name++; } else { int i; for (i=0; i<THREAD_WIDTH; ++i) *pre++ = *thread_name++; } } if (log_decor != 0 && log_decor != PJ_LOG_HAS_NEWLINE) *pre++ = ' '; if (log_decor & PJ_LOG_HAS_THREAD_SWC) { void *current_thread = (void*)pj_thread_this(); if (current_thread != g_last_thread) { *pre++ = '!'; g_last_thread = current_thread; } else { *pre++ = ' '; } } else if (log_decor & PJ_LOG_HAS_SPACE) { *pre++ = ' '; } #if PJ_LOG_ENABLE_INDENT if (log_decor & PJ_LOG_HAS_INDENT) { indent = log_get_indent(); if (indent > 0) { pj_memset(pre, PJ_LOG_INDENT_CHAR, indent); pre += indent; } } #endif len = pre - log_buffer; /* Print the whole message to the string log_buffer. */ print_len = pj_ansi_vsnprintf(pre, sizeof(log_buffer)-len, format, marker); if (print_len < 0) { level = 1; print_len = pj_ansi_snprintf(pre, sizeof(log_buffer)-len, "<logging error: msg too long>"); } len = len + print_len; if (len > 0 && len < (int)sizeof(log_buffer)-2) { if (log_decor & PJ_LOG_HAS_CR) { log_buffer[len++] = '\r'; } if (log_decor & PJ_LOG_HAS_NEWLINE) { log_buffer[len++] = '\n'; } log_buffer[len] = '\0'; } else { len = sizeof(log_buffer)-1; if (log_decor & PJ_LOG_HAS_CR) { log_buffer[sizeof(log_buffer)-3] = '\r'; } if (log_decor & PJ_LOG_HAS_NEWLINE) { log_buffer[sizeof(log_buffer)-2] = '\n'; } log_buffer[sizeof(log_buffer)-1] = '\0'; } /* It should be safe to resume logging at this point. Application can * recursively call the logging function inside the callback. */ resume_logging(&saved_level); if (log_writer) (*log_writer)(level, log_buffer, len); }
PJ_DEF(pj_status_t) pjstun_get_mapped_addr( pj_pool_factory *pf, int sock_cnt, pj_sock_t sock[], const pj_str_t *srv1, int port1, const pj_str_t *srv2, int port2, pj_sockaddr_in mapped_addr[]) { unsigned srv_cnt; pj_sockaddr_in srv_addr[2]; int i, j, send_cnt = 0, nfds; pj_pool_t *pool; struct query_rec { struct { pj_uint32_t mapped_addr; pj_uint32_t mapped_port; } srv[2]; } *rec; void *out_msg; pj_size_t out_msg_len; int wait_resp = 0; pj_status_t status; PJ_CHECK_STACK(); TRACE_((THIS_FILE, "Entering pjstun_get_mapped_addr()")); /* Create pool. */ pool = pj_pool_create(pf, "stun%p", 400, 400, NULL); if (!pool) return PJ_ENOMEM; /* Allocate client records */ rec = (struct query_rec*) pj_pool_calloc(pool, sock_cnt, sizeof(*rec)); if (!rec) { status = PJ_ENOMEM; goto on_error; } TRACE_((THIS_FILE, " Memory allocated.")); /* Create the outgoing BIND REQUEST message template */ status = pjstun_create_bind_req( pool, &out_msg, &out_msg_len, pj_rand(), pj_rand()); if (status != PJ_SUCCESS) goto on_error; TRACE_((THIS_FILE, " Binding request created.")); /* Resolve servers. */ status = pj_sockaddr_in_init(&srv_addr[0], srv1, (pj_uint16_t)port1); if (status != PJ_SUCCESS) goto on_error; srv_cnt = 1; if (srv2 && port2) { status = pj_sockaddr_in_init(&srv_addr[1], srv2, (pj_uint16_t)port2); if (status != PJ_SUCCESS) goto on_error; if (srv_addr[1].sin_addr.s_addr != srv_addr[0].sin_addr.s_addr && srv_addr[1].sin_port != srv_addr[0].sin_port) { srv_cnt++; } } TRACE_((THIS_FILE, " Server initialized, using %d server(s)", srv_cnt)); /* Init mapped addresses to zero */ pj_memset(mapped_addr, 0, sock_cnt * sizeof(pj_sockaddr_in)); /* We need these many responses */ wait_resp = sock_cnt * srv_cnt; TRACE_((THIS_FILE, " Done initialization.")); #if defined(PJ_SELECT_NEEDS_NFDS) && PJ_SELECT_NEEDS_NFDS!=0 nfds = -1; for (i=0; i<sock_cnt; ++i) { if (sock[i] > nfds) { nfds = sock[i]; } } #else nfds = FD_SETSIZE-1; #endif /* Main retransmission loop. */ for (send_cnt=0; send_cnt<MAX_REQUEST; ++send_cnt) { pj_time_val next_tx, now; pj_fd_set_t r; int select_rc; PJ_FD_ZERO(&r); /* Send messages to servers that has not given us response. */ for (i=0; i<sock_cnt && status==PJ_SUCCESS; ++i) { for (j=0; j<srv_cnt && status==PJ_SUCCESS; ++j) { pjstun_msg_hdr *msg_hdr = (pjstun_msg_hdr*) out_msg; pj_ssize_t sent_len; if (rec[i].srv[j].mapped_port != 0) continue; /* Modify message so that we can distinguish response. */ msg_hdr->tsx[2] = pj_htonl(i); msg_hdr->tsx[3] = pj_htonl(j); /* Send! */ sent_len = out_msg_len; status = pj_sock_sendto(sock[i], out_msg, &sent_len, 0, (pj_sockaddr_t*)&srv_addr[j], sizeof(pj_sockaddr_in)); } } /* All requests sent. * The loop below will wait for responses until all responses have * been received (i.e. wait_resp==0) or timeout occurs, which then * we'll go to the next retransmission iteration. */ TRACE_((THIS_FILE, " Request(s) sent, counter=%d", send_cnt)); /* Calculate time of next retransmission. */ pj_gettimeofday(&next_tx); next_tx.sec += (stun_timer[send_cnt]/1000); next_tx.msec += (stun_timer[send_cnt]%1000); pj_time_val_normalize(&next_tx); for (pj_gettimeofday(&now), select_rc=1; status==PJ_SUCCESS && select_rc>=1 && wait_resp>0 && PJ_TIME_VAL_LT(now, next_tx); pj_gettimeofday(&now)) { pj_time_val timeout; timeout = next_tx; PJ_TIME_VAL_SUB(timeout, now); for (i=0; i<sock_cnt; ++i) { PJ_FD_SET(sock[i], &r); } select_rc = pj_sock_select(nfds+1, &r, NULL, NULL, &timeout); TRACE_((THIS_FILE, " select() rc=%d", select_rc)); if (select_rc < 1) continue; for (i=0; i<sock_cnt; ++i) { int sock_idx, srv_idx; pj_ssize_t len; pjstun_msg msg; pj_sockaddr_in addr; int addrlen = sizeof(addr); pjstun_mapped_addr_attr *attr; char recv_buf[128]; if (!PJ_FD_ISSET(sock[i], &r)) continue; len = sizeof(recv_buf); status = pj_sock_recvfrom( sock[i], recv_buf, &len, 0, (pj_sockaddr_t*)&addr, &addrlen); if (status != PJ_SUCCESS) { char errmsg[PJ_ERR_MSG_SIZE]; PJ_LOG(4,(THIS_FILE, "recvfrom() error ignored: %s", pj_strerror(status, errmsg,sizeof(errmsg)).ptr)); /* Ignore non-PJ_SUCCESS status. * It possible that other SIP entity is currently * sending SIP request to us, and because SIP message * is larger than STUN, we could get EMSGSIZE when * we call recvfrom(). */ status = PJ_SUCCESS; continue; } status = pjstun_parse_msg(recv_buf, len, &msg); if (status != PJ_SUCCESS) { char errmsg[PJ_ERR_MSG_SIZE]; PJ_LOG(4,(THIS_FILE, "STUN parsing error ignored: %s", pj_strerror(status, errmsg,sizeof(errmsg)).ptr)); /* Also ignore non-successful parsing. This may not * be STUN response at all. See the comment above. */ status = PJ_SUCCESS; continue; } sock_idx = pj_ntohl(msg.hdr->tsx[2]); srv_idx = pj_ntohl(msg.hdr->tsx[3]); if (sock_idx<0 || sock_idx>=sock_cnt || sock_idx!=i || srv_idx<0 || srv_idx>=2) { status = PJLIB_UTIL_ESTUNININDEX; continue; } if (pj_ntohs(msg.hdr->type) != PJSTUN_BINDING_RESPONSE) { status = PJLIB_UTIL_ESTUNNOBINDRES; continue; } if (rec[sock_idx].srv[srv_idx].mapped_port != 0) { /* Already got response */ continue; } /* From this part, we consider the packet as a valid STUN * response for our request. */ --wait_resp; if (pjstun_msg_find_attr(&msg, PJSTUN_ATTR_ERROR_CODE) != NULL) { status = PJLIB_UTIL_ESTUNRECVERRATTR; continue; } attr = (pjstun_mapped_addr_attr*) pjstun_msg_find_attr(&msg, PJSTUN_ATTR_MAPPED_ADDR); if (!attr) { attr = (pjstun_mapped_addr_attr*) pjstun_msg_find_attr(&msg, PJSTUN_ATTR_XOR_MAPPED_ADDR); if (!attr || attr->family != 1) { status = PJLIB_UTIL_ESTUNNOMAP; continue; } } rec[sock_idx].srv[srv_idx].mapped_addr = attr->addr; rec[sock_idx].srv[srv_idx].mapped_port = attr->port; if (pj_ntohs(attr->hdr.type) == PJSTUN_ATTR_XOR_MAPPED_ADDR) { rec[sock_idx].srv[srv_idx].mapped_addr ^= pj_htonl(STUN_MAGIC); rec[sock_idx].srv[srv_idx].mapped_port ^= pj_htons(STUN_MAGIC >> 16); } } } /* The best scenario is if all requests have been replied. * Then we don't need to go to the next retransmission iteration. */ if (wait_resp <= 0) break; }
static int tcp_perf_test(void) { enum { COUNT=100000 }; pj_pool_t *pool = NULL; pj_ioqueue_t *ioqueue = NULL; pj_sock_t sock1=PJ_INVALID_SOCKET, sock2=PJ_INVALID_SOCKET; pj_activesock_t *asock1 = NULL, *asock2 = NULL; pj_activesock_cb cb; struct tcp_state *state1, *state2; unsigned i; pj_status_t status; pool = pj_pool_create(mem, "tcpperf", 256, 256, NULL); status = app_socketpair(pj_AF_INET(), pj_SOCK_STREAM(), 0, &sock1, &sock2); if (status != PJ_SUCCESS) { status = -100; goto on_return; } status = pj_ioqueue_create(pool, 4, &ioqueue); if (status != PJ_SUCCESS) { status = -110; goto on_return; } pj_bzero(&cb, sizeof(cb)); cb.on_data_read = &tcp_on_data_read; cb.on_data_sent = &tcp_on_data_sent; state1 = PJ_POOL_ZALLOC_T(pool, struct tcp_state); status = pj_activesock_create(pool, sock1, pj_SOCK_STREAM(), NULL, ioqueue, &cb, state1, &asock1); if (status != PJ_SUCCESS) { status = -120; goto on_return; } state2 = PJ_POOL_ZALLOC_T(pool, struct tcp_state); status = pj_activesock_create(pool, sock2, pj_SOCK_STREAM(), NULL, ioqueue, &cb, state2, &asock2); if (status != PJ_SUCCESS) { status = -130; goto on_return; } status = pj_activesock_start_read(asock1, pool, 1000, 0); if (status != PJ_SUCCESS) { status = -140; goto on_return; } /* Send packet as quickly as possible */ for (i=0; i<COUNT && !state1->err && !state2->err; ++i) { struct tcp_pkt *pkt; struct send_key send_key[2], *op_key; pj_ssize_t len; pkt = (struct tcp_pkt*)state2->pkt; pkt->signature = SIGNATURE; pkt->seq = i; pj_memset(pkt->fill, 'a', sizeof(pkt->fill)); op_key = &send_key[i%2]; pj_ioqueue_op_key_init(&op_key->op_key, sizeof(*op_key)); state2->sent = PJ_FALSE; len = sizeof(*pkt); status = pj_activesock_send(asock2, &op_key->op_key, pkt, &len, 0); if (status == PJ_EPENDING) { do { #if PJ_SYMBIAN pj_symbianos_poll(-1, -1); #else pj_ioqueue_poll(ioqueue, NULL); #endif } while (!state2->sent); } else { #if PJ_SYMBIAN /* The Symbian socket always returns PJ_SUCCESS for TCP send, * eventhough the remote end hasn't received the data yet. * If we continue sending, eventually send() will block, * possibly because the send buffer is full. So we need to * poll the ioqueue periodically, to let receiver gets the * data. */ pj_symbianos_poll(-1, 0); #endif if (status != PJ_SUCCESS) { PJ_LOG(1,("", " err: send status=%d", status)); status = -180; break; } else if (status == PJ_SUCCESS) { if (len != sizeof(*pkt)) { PJ_LOG(1,("", " err: shouldn't report partial sent")); status = -190; break; } } } } /* Wait until everything has been sent/received */ if (state1->next_recv_seq < COUNT) { #ifdef PJ_SYMBIAN while (pj_symbianos_poll(-1, 1000) == PJ_TRUE) ; #else pj_time_val delay = {0, 100}; while (pj_ioqueue_poll(ioqueue, &delay) > 0) ; #endif } if (status == PJ_EPENDING) status = PJ_SUCCESS; if (status != 0) goto on_return; if (state1->err) { status = -183; goto on_return; } if (state2->err) { status = -186; goto on_return; } if (state1->next_recv_seq != COUNT) { PJ_LOG(3,("", " err: only %u packets received, expecting %u", state1->next_recv_seq, COUNT)); status = -195; goto on_return; } on_return: if (asock2) pj_activesock_close(asock2); if (asock1) pj_activesock_close(asock1); if (ioqueue) pj_ioqueue_destroy(ioqueue); if (pool) pj_pool_release(pool); return status; }
Pj_Time_Val to_time() const { Pj_Timestamp zero; pj_memset(&zero, 0, sizeof(zero)); return Pj_Time_Val(pj_elapsed_time(&zero.ts_, &ts_)); }
pj_uint32_t to_cycle() const { Pj_Timestamp zero; pj_memset(&zero, 0, sizeof(zero)); return pj_elapsed_cycle(&zero.ts_, &ts_); }
PJ_DECL(pj_status_t) pjstun_get_mapped_addr( pj_pool_factory *pf, int sock_cnt, pj_sock_t sock[], const pj_str_t *srv1, int port1, const pj_str_t *srv2, int port2, pj_sockaddr_in mapped_addr[]) { pj_sockaddr_in srv_addr[2]; int i, j, send_cnt = 0; pj_pool_t *pool; struct { struct { pj_uint32_t mapped_addr; pj_uint32_t mapped_port; } srv[2]; } *rec; void *out_msg; pj_size_t out_msg_len; int wait_resp = 0; pj_status_t status; PJ_CHECK_STACK(); /* Create pool. */ pool = pj_pool_create(pf, "stun%p", 1024, 1024, NULL); if (!pool) return PJ_ENOMEM; /* Allocate client records */ rec = pj_pool_calloc(pool, sock_cnt, sizeof(*rec)); if (!rec) { status = PJ_ENOMEM; goto on_error; } /* Create the outgoing BIND REQUEST message template */ status = pjstun_create_bind_req( pool, &out_msg, &out_msg_len, pj_rand(), pj_rand()); if (status != PJ_SUCCESS) goto on_error; /* Resolve servers. */ status = pj_sockaddr_in_init(&srv_addr[0], srv1, (pj_uint16_t)port1); if (status != PJ_SUCCESS) goto on_error; status = pj_sockaddr_in_init(&srv_addr[1], srv2, (pj_uint16_t)port2); if (status != PJ_SUCCESS) goto on_error; /* Init mapped addresses to zero */ pj_memset(mapped_addr, 0, sock_cnt * sizeof(pj_sockaddr_in)); /* Main retransmission loop. */ for (send_cnt=0; send_cnt<MAX_REQUEST; ++send_cnt) { pj_time_val next_tx, now; pj_fd_set_t r; int select_rc; PJ_FD_ZERO(&r); /* Send messages to servers that has not given us response. */ for (i=0; i<sock_cnt && status==PJ_SUCCESS; ++i) { for (j=0; j<2 && status==PJ_SUCCESS; ++j) { pjstun_msg_hdr *msg_hdr = out_msg; pj_ssize_t sent_len; if (rec[i].srv[j].mapped_port != 0) continue; /* Modify message so that we can distinguish response. */ msg_hdr->tsx[2] = pj_htonl(i); msg_hdr->tsx[3] = pj_htonl(j); /* Send! */ sent_len = out_msg_len; status = pj_sock_sendto(sock[i], out_msg, &sent_len, 0, (pj_sockaddr_t*)&srv_addr[j], sizeof(pj_sockaddr_in)); if (status == PJ_SUCCESS) ++wait_resp; } } /* All requests sent. * The loop below will wait for responses until all responses have * been received (i.e. wait_resp==0) or timeout occurs, which then * we'll go to the next retransmission iteration. */ /* Calculate time of next retransmission. */ pj_gettimeofday(&next_tx); next_tx.sec += (stun_timer[send_cnt]/1000); next_tx.msec += (stun_timer[send_cnt]%1000); pj_time_val_normalize(&next_tx); for (pj_gettimeofday(&now), select_rc=1; status==PJ_SUCCESS && select_rc==1 && wait_resp>0 && PJ_TIME_VAL_LT(now, next_tx); pj_gettimeofday(&now)) { pj_time_val timeout; timeout = next_tx; PJ_TIME_VAL_SUB(timeout, now); for (i=0; i<sock_cnt; ++i) { PJ_FD_SET(sock[i], &r); } select_rc = pj_sock_select(FD_SETSIZE, &r, NULL, NULL, &timeout); if (select_rc < 1) continue; for (i=0; i<sock_cnt; ++i) { int sock_idx, srv_idx; pj_ssize_t len; pjstun_msg msg; pj_sockaddr_in addr; int addrlen = sizeof(addr); pjstun_mapped_addr_attr *attr; char recv_buf[128]; if (!PJ_FD_ISSET(sock[i], &r)) continue; len = sizeof(recv_buf); status = pj_sock_recvfrom( sock[i], recv_buf, &len, 0, (pj_sockaddr_t*)&addr, &addrlen); --wait_resp; if (status != PJ_SUCCESS) continue; status = pjstun_parse_msg(recv_buf, len, &msg); if (status != PJ_SUCCESS) { continue; } sock_idx = pj_ntohl(msg.hdr->tsx[2]); srv_idx = pj_ntohl(msg.hdr->tsx[3]); if (sock_idx<0 || sock_idx>=sock_cnt || srv_idx<0 || srv_idx>=2) { status = PJLIB_UTIL_ESTUNININDEX; continue; } if (pj_ntohs(msg.hdr->type) != PJSTUN_BINDING_RESPONSE) { status = PJLIB_UTIL_ESTUNNOBINDRES; continue; } if (pjstun_msg_find_attr(&msg, PJSTUN_ATTR_ERROR_CODE) != NULL) { status = PJLIB_UTIL_ESTUNRECVERRATTR; continue; } attr = (void*)pjstun_msg_find_attr(&msg, PJSTUN_ATTR_MAPPED_ADDR); if (!attr) { status = PJLIB_UTIL_ESTUNNOMAP; continue; } rec[sock_idx].srv[srv_idx].mapped_addr = attr->addr; rec[sock_idx].srv[srv_idx].mapped_port = attr->port; } } /* The best scenario is if all requests have been replied. * Then we don't need to go to the next retransmission iteration. */ if (wait_resp <= 0) break; } for (i=0; i<sock_cnt && status==PJ_SUCCESS; ++i) { if (rec[i].srv[0].mapped_addr == rec[i].srv[1].mapped_addr && rec[i].srv[0].mapped_port == rec[i].srv[1].mapped_port) { mapped_addr[i].sin_family = PJ_AF_INET; mapped_addr[i].sin_addr.s_addr = rec[i].srv[0].mapped_addr; mapped_addr[i].sin_port = (pj_uint16_t)rec[i].srv[0].mapped_port; if (rec[i].srv[0].mapped_addr == 0 || rec[i].srv[0].mapped_port == 0) { status = PJLIB_UTIL_ESTUNNOTRESPOND; break; } } else { status = PJLIB_UTIL_ESTUNSYMMETRIC; break; } } pj_pool_release(pool); return status; on_error: if (pool) pj_pool_release(pool); return status; }
PJ_DEF(pj_status_t) pjmedia_natnl_stream_create(pj_pool_t *pool, pjsua_call *call, pjmedia_stream_info *si, natnl_stream **stream) { pj_status_t status = PJ_SUCCESS; unsigned strm_idx = 0; strm_idx = call->index; /* TODO: * - Create and start your media stream based on the parameters * in si */ PJ_ASSERT_RETURN(pool, PJ_EINVAL); PJ_LOG(4,(THIS_FILE,"natnl audio channel update..strm_idx=%d", strm_idx)); /* Check if no media is active */ if (si->dir != PJMEDIA_DIR_NONE) { /* Create session based on session info. */ #if 0 pool = pj_pool_create(strm_pool->factory, "strm%p", NATNL_STREAM_SIZE, NATNL_STREAM_INC, NULL); PJ_ASSERT_RETURN(pool != NULL, PJ_ENOMEM); #endif pj_mutex_lock(call->tnl_stream_lock); pj_mutex_lock(call->tnl_stream_lock2); pj_mutex_lock(call->tnl_stream_lock3); // DEAN don't re-create natnl stream if (call->tnl_stream) { *stream = call->tnl_stream; pj_mutex_unlock(call->tnl_stream_lock3); pj_mutex_unlock(call->tnl_stream_lock2); pj_mutex_unlock(call->tnl_stream_lock); return PJ_SUCCESS; } call->tnl_stream = *stream = PJ_POOL_ZALLOC_T(pool, natnl_stream); PJ_ASSERT_RETURN(*stream != NULL, PJ_ENOMEM); (*stream)->call = call; (*stream)->own_pool = pool; (*stream)->med_tp = call->med_tp; pj_memcpy(&(*stream)->rem_addr, &si->rem_addr, sizeof(pj_sockaddr)); pj_list_init(&(*stream)->rbuff); pj_list_init(&(*stream)->gcbuff); pj_get_timestamp(&(*stream)->last_data_or_ka); pj_get_timestamp(&(*stream)->last_data); (*stream)->rbuff_cnt = 0; (*stream)->rx_band = (pj_band_t *)malloc(sizeof(pj_band_t)); (*stream)->tx_band = (pj_band_t *)malloc(sizeof(pj_band_t)); pj_memset((*stream)->rx_band, 0, sizeof(pj_band_t)); pj_memset((*stream)->tx_band, 0, sizeof(pj_band_t)); pj_bandwidthSetLimited((*stream)->rx_band, PJ_FALSE); pj_bandwidthSetLimited((*stream)->tx_band, PJ_FALSE); /* Create mutex to protect jitter buffer: */ status = pj_mutex_create_simple(pool, NULL, &(*stream)->rbuff_mutex); if (status != PJ_SUCCESS) { //pj_pool_t *tmp_pool = (*stream)->own_pool; (*stream)->own_pool = NULL; //pj_pool_release(tmp_pool); goto on_return; } status = pj_mutex_create_simple(pool, NULL, &(*stream)->gcbuff_mutex); if (status != PJ_SUCCESS) { (*stream)->own_pool = NULL; goto on_return; } /* Create semaphore */ status = pj_sem_create(pool, "client", 0, 65535, &(*stream)->rbuff_sem); if (status != PJ_SUCCESS) { (*stream)->own_pool = NULL; goto on_return; } // +Roger - Create Send buffer Mutex status = pj_mutex_create_simple(pool, NULL, &(*stream)->sbuff_mutex); if (status != PJ_SUCCESS) { //pj_pool_t *tmp_pool = (*stream)->own_pool; (*stream)->own_pool = NULL; //pj_pool_release(tmp_pool); goto on_return; } //------------------------------------// #if 0 /* Attach our RTP and RTCP callbacks to the media transport */ status = pjmedia_transport_attach(call_med->tp, stream, //call_med, &si->rem_addr, &si->rem_rtcp, pj_sockaddr_get_len(&si->rem_addr), &aud_rtp_cb, &aud_rtcp_cb); #endif } on_return: pj_mutex_unlock(call->tnl_stream_lock3); pj_mutex_unlock(call->tnl_stream_lock2); pj_mutex_unlock(call->tnl_stream_lock); return status; }
static int multipart_print_body(struct pjsip_msg_body *msg_body, char *buf, pj_size_t size) { const struct multipart_data *m_data; pj_str_t clen_hdr = { "Content-Length: ", 16}; pjsip_multipart_part *part; char *p = buf, *end = buf+size; #define SIZE_LEFT() (end-p) m_data = (const struct multipart_data*)msg_body->data; PJ_ASSERT_RETURN(m_data && !pj_list_empty(&m_data->part_head), PJ_EINVAL); part = m_data->part_head.next; while (part != &m_data->part_head) { enum { CLEN_SPACE = 5 }; char *clen_pos; const pjsip_hdr *hdr; clen_pos = NULL; /* Print delimiter */ if (SIZE_LEFT() <= (m_data->boundary.slen+8) << 1) return -1; *p++ = 13; *p++ = 10; *p++ = '-'; *p++ = '-'; pj_memcpy(p, m_data->boundary.ptr, m_data->boundary.slen); p += m_data->boundary.slen; *p++ = 13; *p++ = 10; /* Print optional headers */ hdr = part->hdr.next; while (hdr != &part->hdr) { int printed = pjsip_hdr_print_on((pjsip_hdr*)hdr, p, SIZE_LEFT()-2); if (printed < 0) return -1; p += printed; *p++ = '\r'; *p++ = '\n'; hdr = hdr->next; } /* Automaticly adds Content-Type and Content-Length headers, only * if content_type is set in the message body. */ if (part->body && part->body->content_type.type.slen) { pj_str_t ctype_hdr = { "Content-Type: ", 14}; const pjsip_media_type *media = &part->body->content_type; if (pjsip_use_compact_form) { ctype_hdr.ptr = "c: "; ctype_hdr.slen = 3; } /* Add Content-Type header. */ if ( (end-p) < 24 + media->type.slen + media->subtype.slen) { return -1; } pj_memcpy(p, ctype_hdr.ptr, ctype_hdr.slen); p += ctype_hdr.slen; p += pjsip_media_type_print(p, (unsigned)(end-p), media); *p++ = '\r'; *p++ = '\n'; /* Add Content-Length header. */ if ((end-p) < clen_hdr.slen + 12 + 2) { return -1; } pj_memcpy(p, clen_hdr.ptr, clen_hdr.slen); p += clen_hdr.slen; /* Print blanks after "Content-Length:", this is where we'll put * the content length value after we know the length of the * body. */ pj_memset(p, ' ', CLEN_SPACE); clen_pos = p; p += CLEN_SPACE; *p++ = '\r'; *p++ = '\n'; } /* Empty newline */ *p++ = 13; *p++ = 10; /* Print the body */ pj_assert(part->body != NULL); if (part->body) { int printed = part->body->print_body(part->body, p, SIZE_LEFT()); if (printed < 0) return -1; p += printed; /* Now that we have the length of the body, print this to the * Content-Length header. */ if (clen_pos) { char tmp[16]; int len; len = pj_utoa(printed, tmp); if (len > CLEN_SPACE) len = CLEN_SPACE; pj_memcpy(clen_pos+CLEN_SPACE-len, tmp, len); } } part = part->next; } /* Print closing delimiter */ if (SIZE_LEFT() < m_data->boundary.slen+8) return -1; *p++ = 13; *p++ = 10; *p++ = '-'; *p++ = '-'; pj_memcpy(p, m_data->boundary.ptr, m_data->boundary.slen); p += m_data->boundary.slen; *p++ = '-'; *p++ = '-'; *p++ = 13; *p++ = 10; #undef SIZE_LEFT return (int)(p - buf); }
pj_status_t pjmedia_vid_dev_conv_resize_and_rotate(pjmedia_vid_dev_conv *conv, void *src_buf, void **result) { #define swap(a, b) {pj_uint8_t *c = a; a = b; b = c;} pj_status_t status; pjmedia_frame src_frame, dst_frame; pjmedia_rect_size src_size = conv->src_size; pj_uint8_t *src = src_buf; pj_uint8_t *dst = conv->conv_buf; pj_assert(src_buf); if (!conv->conv) return PJ_EINVALIDOP; if (!conv->match_src_dst) { /* We need to resize. */ src_frame.buf = src; dst_frame.buf = dst; src_frame.size = conv->src_frame_size; dst_frame.size = conv->conv_frame_size; status = pjmedia_converter_convert(conv->conv, &src_frame, &dst_frame); if (status != PJ_SUCCESS) { PJ_LOG(3, (THIS_FILE, "Failed to convert frame")); return status; } src_size = conv->res_size; swap(src, dst); } if (conv->handle_rotation && conv->rotation != PJMEDIA_ORIENT_NATURAL) { /* We need to do rotation. */ if (conv->fmt.id == PJMEDIA_FORMAT_I420) { pjmedia_rect_size dst_size = src_size; pj_size_t p_len = src_size.w * src_size.h; if (conv->rotation == PJMEDIA_ORIENT_ROTATE_90DEG || conv->rotation == PJMEDIA_ORIENT_ROTATE_270DEG) { dst_size.w = src_size.h; dst_size.h = src_size.w; } #if defined(PJMEDIA_HAS_LIBYUV) && PJMEDIA_HAS_LIBYUV != 0 enum RotationMode mode; switch (conv->rotation) { case PJMEDIA_ORIENT_ROTATE_90DEG: mode = kRotate90; break; case PJMEDIA_ORIENT_ROTATE_180DEG: mode = kRotate180; break; case PJMEDIA_ORIENT_ROTATE_270DEG: mode = kRotate270; break; default: mode = kRotate0; } I420Rotate(src, src_size.w, src+p_len, src_size.w/2, src+p_len+p_len/4, src_size.w/2, dst, dst_size.w, dst+p_len, dst_size.w/2, dst+p_len+p_len/4, dst_size.w/2, src_size.w, src_size.h, mode); swap(src, dst); #else PJ_UNUSED_ARG(p_len); PJ_UNUSED_ARG(dst_size); #endif } } if (!conv->match_src_dst && conv->maintain_aspect_ratio) { /* Center the frame and fill the area with black color */ if (conv->fmt.id == PJMEDIA_FORMAT_I420) { unsigned i = 0; pj_uint8_t *pdst = dst; pj_uint8_t *psrc = src; pj_size_t p_len_src = 0, p_len_dst = conv->wxh; int pad = conv->pad; pj_bzero(pdst, p_len_dst); if (conv->fit_to_h) { /* Fill the left and right with black */ for (; i < conv->dst_size.h; ++i) { pdst += pad; pj_memcpy(pdst, psrc, conv->rot_size.w); pdst += conv->rot_size.w; psrc += conv->rot_size.w; pdst += pad; } } else { /* Fill the top and bottom with black */ p_len_src = conv->rot_size.w * conv->rot_size.h; pj_memcpy(pdst + conv->rot_size.w * pad, psrc, p_len_src); psrc += p_len_src; pdst += p_len_dst; } /* Fill the U&V components with 0x80 to make it black. * Bzero-ing will make the area look green instead. */ pj_memset(pdst, 0x80, p_len_dst/2); pad /= 2; if (conv->fit_to_h) { p_len_src = conv->rot_size.w / 2; for (i = conv->dst_size.h; i > 0; --i) { pdst += pad; pj_memcpy(pdst, psrc, p_len_src); pdst += p_len_src; psrc += p_len_src; pdst += pad; } } else { pj_uint8_t *U, *V; pj_size_t gap = conv->rot_size.w * pad / 2; p_len_src /= 4; U = pdst; V = U + p_len_dst/4; pj_memcpy(U + gap, psrc, p_len_src); psrc += p_len_src; pj_memcpy(V + gap, psrc, p_len_src); } swap(src, dst); } } *result = src; return PJ_SUCCESS; }
/* This function is called when we receive SUBSCRIBE request message for * a new subscription. */ static void on_new_subscription( pjsip_transaction *tsx, pjsip_rx_data *rdata ) { package *pkg; pj_pool_t *pool; pjsip_event_sub *sub = NULL; pj_str_t hname; int status = 200; pj_str_t reason = { NULL, 0 }; pjsip_tx_data *tdata; pjsip_expires_hdr *expires; pjsip_accept_hdr *accept; pjsip_event_hdr *evhdr; /* Get the Event header. */ hname = pj_str("Event"); evhdr = pjsip_msg_find_hdr_by_name(rdata->msg, &hname, NULL); if (!evhdr) { status = 400; reason = pj_str("No Event header in request"); goto send_response; } /* Find corresponding package. * We don't lock the manager's mutex since we assume the package list * won't change once the application is running! */ pkg = mgr.pkg_list.next; while (pkg != &mgr.pkg_list) { if (pj_stricmp(&pkg->event, &evhdr->event_type) == 0) break; pkg = pkg->next; } if (pkg == &mgr.pkg_list) { /* Event type is not supported by any packages! */ status = 489; reason = pj_str("Bad Event"); goto send_response; } /* First check that the Accept specification matches the * package's Accept types. */ accept = pjsip_msg_find_hdr(rdata->msg, PJSIP_H_ACCEPT, NULL); if (accept) { unsigned i; pj_str_t *content_type = NULL; for (i=0; i<accept->count && !content_type; ++i) { int j; for (j=0; j<pkg->accept_cnt; ++j) { if (pj_stricmp(&accept->values[i], &pkg->accept[j])==0) { content_type = &pkg->accept[j]; break; } } } if (!content_type) { status = PJSIP_SC_NOT_ACCEPTABLE_HERE; goto send_response; } } /* Check whether the package wants to accept the subscription. */ pj_assert(pkg->cb.on_query_subscribe != NULL); (*pkg->cb.on_query_subscribe)(rdata, &status); if (!PJSIP_IS_STATUS_IN_CLASS(status,200)) goto send_response; /* Create new subscription record. */ pool = pjsip_endpt_create_pool(tsx->endpt, "esub", SUB_POOL_SIZE, SUB_POOL_INC); if (!pool) { status = 500; goto send_response; } sub = pj_pool_calloc(pool, 1, sizeof(*sub)); sub->pool = pool; sub->mutex = pj_mutex_create(pool, "esub", PJ_MUTEX_RECURSE); if (!sub->mutex) { status = 500; goto send_response; } PJ_LOG(4,(THIS_FILE, "event_sub%p: notifier is created.", sub)); /* Start locking mutex. */ pj_mutex_lock(sub->mutex); /* Init UAS subscription */ sub->endpt = tsx->endpt; sub->role = PJSIP_ROLE_UAS; sub->state = PJSIP_EVENT_SUB_STATE_PENDING; sub->state_str = state[sub->state]; pj_list_init(&sub->auth_sess); pj_list_init(&sub->route_set); sub->from = pjsip_hdr_clone(pool, rdata->to); pjsip_fromto_set_from(sub->from); if (sub->from->tag.slen == 0) { pj_create_unique_string(pool, &sub->from->tag); rdata->to->tag = sub->from->tag; } sub->to = pjsip_hdr_clone(pool, rdata->from); pjsip_fromto_set_to(sub->to); sub->contact = pjsip_contact_hdr_create(pool); sub->contact->uri = sub->from->uri; sub->call_id = pjsip_cid_hdr_create(pool); pj_strdup(pool, &sub->call_id->id, &rdata->call_id); sub->cseq = pj_rand() % 0xFFFF; expires = pjsip_msg_find_hdr( rdata->msg, PJSIP_H_EXPIRES, NULL); if (expires) { sub->default_interval = expires->ivalue; if (sub->default_interval > 0 && sub->default_interval < SECONDS_BEFORE_EXPIRY) { status = 423; /* Interval too short. */ goto send_response; } } else { sub->default_interval = 600; } /* Clone Event header. */ sub->event = pjsip_hdr_clone(pool, evhdr); /* Register to hash table. */ create_subscriber_key(&sub->key, pool, PJSIP_ROLE_UAS, &sub->call_id->id, &sub->from->tag); pj_mutex_lock(mgr.mutex); pj_hash_set(pool, mgr.ht, sub->key.ptr, sub->key.slen, sub); pj_mutex_unlock(mgr.mutex); /* Set timer where subscription will expire only when expires<>0. * Subscriber may send new subscription with expires==0. */ if (sub->default_interval != 0) { sub_schedule_uas_expire( sub, sub->default_interval-SECONDS_BEFORE_EXPIRY); } /* Notify application. */ if (pkg->cb.on_subscribe) { pjsip_event_sub_cb *cb = NULL; sub->pending_tsx++; (*pkg->cb.on_subscribe)(sub, rdata, &cb, &sub->default_interval); sub->pending_tsx--; if (cb == NULL) pj_memset(&sub->cb, 0, sizeof(*cb)); else pj_memcpy(&sub->cb, cb, sizeof(*cb)); } send_response: PJ_LOG(4,(THIS_FILE, "event_sub%p (%s)(UAS): status=%d", sub, state[sub->state].ptr, status)); tdata = pjsip_endpt_create_response( tsx->endpt, rdata, status); if (tdata) { if (reason.slen) { /* Customize reason text. */ tdata->msg->line.status.reason = reason; } if (PJSIP_IS_STATUS_IN_CLASS(status,200)) { /* Add Expires header. */ pjsip_expires_hdr *hdr; hdr = pjsip_expires_hdr_create(tdata->pool); hdr->ivalue = sub->default_interval; pjsip_msg_add_hdr( tdata->msg, (pjsip_hdr*)hdr ); } if (status == 423) { /* Add Min-Expires header. */ pjsip_min_expires_hdr *hdr; hdr = pjsip_min_expires_hdr_create(tdata->pool); hdr->ivalue = SECONDS_BEFORE_EXPIRY; pjsip_msg_add_hdr( tdata->msg, (pjsip_hdr*)hdr); } if (status == 489 || status==PJSIP_SC_NOT_ACCEPTABLE_HERE || PJSIP_IS_STATUS_IN_CLASS(status,200)) { /* Add Allow-Events header. */ pjsip_hdr *hdr; hdr = pjsip_hdr_shallow_clone(tdata->pool, mgr.allow_events); pjsip_msg_add_hdr(tdata->msg, hdr); /* Should add Accept header?. */ } pjsip_tsx_on_tx_msg(tsx, tdata); } /* If received new subscription with expires=0, terminate. */ if (sub && sub->default_interval == 0) { pj_assert(sub->state == PJSIP_EVENT_SUB_STATE_TERMINATED); if (sub->cb.on_sub_terminated) { pj_str_t reason = { "timeout", 7 }; (*sub->cb.on_sub_terminated)(sub, &reason); } } if (!PJSIP_IS_STATUS_IN_CLASS(status,200) || (sub && sub->delete_flag)) { if (sub && sub->mutex) { pjsip_event_sub_destroy(sub); } else if (sub) { pjsip_endpt_destroy_pool(tsx->endpt, sub->pool); } } else { pj_assert(status >= 200); pj_mutex_unlock(sub->mutex); } }