static pj_status_t play_cb(void *user_data, pjmedia_frame *frame) { struct test_data *test_data = (struct test_data *)user_data; struct stream_data *strm_data = &test_data->playback_data; pj_mutex_lock(test_data->mutex); /* Skip frames when test is not started or test has finished */ if (!test_data->running) { pj_bzero(frame->buf, frame->size); pj_mutex_unlock(test_data->mutex); return PJ_SUCCESS; } /* Save last timestamp seen (to calculate drift) */ strm_data->last_timestamp = frame->timestamp.u32.lo; if (strm_data->last_called.u64 == 0) { /* Init vars. */ pj_get_timestamp(&strm_data->last_called); pj_math_stat_init(&strm_data->delay); strm_data->first_timestamp = frame->timestamp.u32.lo; } else { pj_timestamp now; unsigned delay; /* Calculate frame interval */ pj_get_timestamp(&now); delay = pj_elapsed_usec(&strm_data->last_called, &now); strm_data->last_called = now; /* Update frame interval statistic */ pj_math_stat_update(&strm_data->delay, delay); } pj_bzero(frame->buf, frame->size); pj_mutex_unlock(test_data->mutex); return PJ_SUCCESS; }
static void mem_test(pj_pool_t *pool) { char unused[1024]; short *frame = pj_pool_alloc(pool, 240+4*160); pj_timestamp elapsed, zero, t1, t2; unsigned samples = 0; elapsed.u64 = 0; while (samples < 50000000) { pj_get_timestamp(&t1); pjmedia_move_samples(frame, frame+160, 240+2*160); pj_get_timestamp(&t2); pj_sub_timestamp(&t2, &t1); elapsed.u64 += t2.u64; memset(unused, 0, sizeof(unused)); samples += 160; } zero.u64 = 0; zero.u64 = pj_elapsed_usec(&zero, &elapsed); zero.u64 = samples * PJ_INT64(1000000) / zero.u64; assert(zero.u32.hi == 0); PJ_LOG(3,("test.c", "Processing: %f Msamples per second", zero.u32.lo/1000000.0)); PJ_LOG(3,("test.c", "CPU load for current settings: %f%%", CLOCK_RATE * 100.0 / zero.u32.lo)); }
int transport_rt_test( pjsip_transport_type_e tp_type, pjsip_transport *ref_tp, char *target_url, int *lost) { enum { THREADS = 4, INTERVAL = 10 }; int i; pj_status_t status; pj_pool_t *pool; pj_bool_t logger_enabled; pj_timestamp zero_time, total_time; unsigned usec_rt; unsigned total_sent; unsigned total_recv; PJ_UNUSED_ARG(tp_type); PJ_UNUSED_ARG(ref_tp); PJ_LOG(3,(THIS_FILE, " multithreaded round-trip test (%d threads)...", THREADS)); PJ_LOG(3,(THIS_FILE, " this will take approx %d seconds, please wait..", INTERVAL)); /* Make sure msg logger is disabled. */ logger_enabled = msg_logger_set_enabled(0); /* Register module (if not yet registered) */ if (rt_module.id == -1) { status = pjsip_endpt_register_module( endpt, &rt_module ); if (status != PJ_SUCCESS) { app_perror(" error: unable to register module", status); return -600; } } /* Create pool for this test. */ pool = pjsip_endpt_create_pool(endpt, NULL, 4000, 4000); if (!pool) return -610; /* Initialize static test data. */ pj_ansi_strcpy(rt_target_uri, target_url); rt_call_id = pj_str("RT-Call-Id/"); rt_stop = PJ_FALSE; /* Initialize thread data. */ for (i=0; i<THREADS; ++i) { char buf[1]; pj_str_t str_id; pj_strset(&str_id, buf, 1); pj_bzero(&rt_test_data[i], sizeof(rt_test_data[i])); /* Init timer entry */ rt_test_data[i].tx_timer.id = i; rt_test_data[i].tx_timer.cb = &rt_tx_timer; rt_test_data[i].timeout_timer.id = i; rt_test_data[i].timeout_timer.cb = &rt_timeout_timer; /* Generate Call-ID for each thread. */ rt_test_data[i].call_id.ptr = (char*) pj_pool_alloc(pool, rt_call_id.slen+1); pj_strcpy(&rt_test_data[i].call_id, &rt_call_id); buf[0] = '0' + (char)i; pj_strcat(&rt_test_data[i].call_id, &str_id); /* Init mutex. */ status = pj_mutex_create_recursive(pool, "rt", &rt_test_data[i].mutex); if (status != PJ_SUCCESS) { app_perror(" error: unable to create mutex", status); return -615; } /* Create thread, suspended. */ status = pj_thread_create(pool, "rttest%p", &rt_worker_thread, (void*)(long)i, 0, PJ_THREAD_SUSPENDED, &rt_test_data[i].thread); if (status != PJ_SUCCESS) { app_perror(" error: unable to create thread", status); return -620; } } /* Start threads! */ for (i=0; i<THREADS; ++i) { pj_time_val delay = {0,0}; pj_thread_resume(rt_test_data[i].thread); /* Schedule first message transmissions. */ rt_test_data[i].tx_timer.user_data = (void*)1; pjsip_endpt_schedule_timer(endpt, &rt_test_data[i].tx_timer, &delay); } /* Sleep for some time. */ pj_thread_sleep(INTERVAL * 1000); /* Signal thread to stop. */ rt_stop = PJ_TRUE; /* Wait threads to complete. */ for (i=0; i<THREADS; ++i) { pj_thread_join(rt_test_data[i].thread); pj_thread_destroy(rt_test_data[i].thread); } /* Destroy rt_test_data */ for (i=0; i<THREADS; ++i) { pj_mutex_destroy(rt_test_data[i].mutex); pjsip_endpt_cancel_timer(endpt, &rt_test_data[i].timeout_timer); } /* Gather statistics. */ pj_bzero(&total_time, sizeof(total_time)); pj_bzero(&zero_time, sizeof(zero_time)); usec_rt = total_sent = total_recv = 0; for (i=0; i<THREADS; ++i) { total_sent += rt_test_data[i].sent_request_count; total_recv += rt_test_data[i].recv_response_count; pj_add_timestamp(&total_time, &rt_test_data[i].total_rt_time); } /* Display statistics. */ if (total_recv) total_time.u64 = total_time.u64/total_recv; else total_time.u64 = 0; usec_rt = pj_elapsed_usec(&zero_time, &total_time); PJ_LOG(3,(THIS_FILE, " done.")); PJ_LOG(3,(THIS_FILE, " total %d messages sent", total_sent)); PJ_LOG(3,(THIS_FILE, " average round-trip=%d usec", usec_rt)); pjsip_endpt_release_pool(endpt, pool); *lost = total_sent-total_recv; /* Flush events. */ flush_events(500); /* Restore msg logger. */ msg_logger_set_enabled(logger_enabled); return 0; }
/* Test that we receive loopback message. */ int transport_send_recv_test( pjsip_transport_type_e tp_type, pjsip_transport *ref_tp, char *target_url, int *p_usec_rtt) { pj_bool_t msg_log_enabled; pj_status_t status; pj_str_t target, from, to, contact, call_id, body; pjsip_method method; pjsip_tx_data *tdata; pj_time_val timeout; PJ_UNUSED_ARG(tp_type); PJ_UNUSED_ARG(ref_tp); PJ_LOG(3,(THIS_FILE, " single message round-trip test...")); /* Register out test module to receive the message (if necessary). */ if (my_module.id == -1) { status = pjsip_endpt_register_module( endpt, &my_module ); if (status != PJ_SUCCESS) { app_perror(" error: unable to register module", status); return -500; } } /* Disable message logging. */ msg_log_enabled = msg_logger_set_enabled(0); /* Create a request message. */ target = pj_str(target_url); from = pj_str(FROM_HDR); to = pj_str(target_url); contact = pj_str(CONTACT_HDR); call_id = pj_str(CALL_ID_HDR); body = pj_str(BODY); pjsip_method_set(&method, PJSIP_OPTIONS_METHOD); status = pjsip_endpt_create_request( endpt, &method, &target, &from, &to, &contact, &call_id, CSEQ_VALUE, &body, &tdata ); if (status != PJ_SUCCESS) { app_perror(" error: unable to create request", status); return -510; } /* Reset statuses */ send_status = recv_status = NO_STATUS; /* Start time. */ pj_get_timestamp(&my_send_time); /* Send the message (statelessly). */ PJ_LOG(5,(THIS_FILE, "Sending request to %.*s", (int)target.slen, target.ptr)); status = pjsip_endpt_send_request_stateless( endpt, tdata, NULL, &send_msg_callback); if (status != PJ_SUCCESS) { /* Immediate error! */ pjsip_tx_data_dec_ref(tdata); send_status = status; } /* Set the timeout (2 seconds from now) */ pj_gettimeofday(&timeout); timeout.sec += 2; /* Loop handling events until we get status */ do { pj_time_val now; pj_time_val poll_interval = { 0, 10 }; pj_gettimeofday(&now); if (PJ_TIME_VAL_GTE(now, timeout)) { PJ_LOG(3,(THIS_FILE, " error: timeout in send/recv test")); status = -540; goto on_return; } if (send_status!=NO_STATUS && send_status!=PJ_SUCCESS) { app_perror(" error sending message", send_status); status = -550; goto on_return; } if (recv_status!=NO_STATUS && recv_status!=PJ_SUCCESS) { app_perror(" error receiving message", recv_status); status = -560; goto on_return; } if (send_status!=NO_STATUS && recv_status!=NO_STATUS) { /* Success! */ break; } pjsip_endpt_handle_events(endpt, &poll_interval); } while (1); if (status == PJ_SUCCESS) { unsigned usec_rt; usec_rt = pj_elapsed_usec(&my_send_time, &my_recv_time); PJ_LOG(3,(THIS_FILE, " round-trip = %d usec", usec_rt)); *p_usec_rtt = usec_rt; } /* Restore message logging. */ msg_logger_set_enabled(msg_log_enabled); status = PJ_SUCCESS; on_return: return status; }
/* Calculate the bandwidth for the specific test configuration. * The test is simple: * - create sockpair_cnt number of producer-consumer socket pair. * - create thread_cnt number of worker threads. * - each producer will send buffer_size bytes data as fast and * as soon as it can. * - each consumer will read buffer_size bytes of data as fast * as it could. * - measure the total bytes received by all consumers during a * period of time. */ static int perform_test(pj_bool_t allow_concur, int sock_type, const char *type_name, unsigned thread_cnt, unsigned sockpair_cnt, pj_size_t buffer_size, pj_size_t *p_bandwidth) { enum { MSEC_DURATION = 5000 }; pj_pool_t *pool; test_item *items; pj_thread_t **thread; pj_ioqueue_t *ioqueue; pj_status_t rc; pj_ioqueue_callback ioqueue_callback; pj_uint32_t total_elapsed_usec, total_received; pj_highprec_t bandwidth; pj_timestamp start, stop; unsigned i; TRACE_((THIS_FILE, " starting test..")); ioqueue_callback.on_read_complete = &on_read_complete; ioqueue_callback.on_write_complete = &on_write_complete; thread_quit_flag = 0; pool = pj_pool_create(mem, NULL, 4096, 4096, NULL); if (!pool) return -10; items = (test_item*) pj_pool_alloc(pool, sockpair_cnt*sizeof(test_item)); thread = (pj_thread_t**) pj_pool_alloc(pool, thread_cnt*sizeof(pj_thread_t*)); TRACE_((THIS_FILE, " creating ioqueue..")); rc = pj_ioqueue_create(pool, sockpair_cnt*2, &ioqueue); if (rc != PJ_SUCCESS) { app_perror("...error: unable to create ioqueue", rc); return -15; } rc = pj_ioqueue_set_default_concurrency(ioqueue, allow_concur); if (rc != PJ_SUCCESS) { app_perror("...error: pj_ioqueue_set_default_concurrency()", rc); return -16; } /* Initialize each producer-consumer pair. */ for (i=0; i<sockpair_cnt; ++i) { pj_ssize_t bytes; items[i].ioqueue = ioqueue; items[i].buffer_size = buffer_size; items[i].outgoing_buffer = (char*) pj_pool_alloc(pool, buffer_size); items[i].incoming_buffer = (char*) pj_pool_alloc(pool, buffer_size); items[i].bytes_recv = items[i].bytes_sent = 0; /* randomize outgoing buffer. */ pj_create_random_string(items[i].outgoing_buffer, buffer_size); /* Create socket pair. */ TRACE_((THIS_FILE, " calling socketpair..")); rc = app_socketpair(pj_AF_INET(), sock_type, 0, &items[i].server_fd, &items[i].client_fd); if (rc != PJ_SUCCESS) { app_perror("...error: unable to create socket pair", rc); return -20; } /* Register server socket to ioqueue. */ TRACE_((THIS_FILE, " register(1)..")); rc = pj_ioqueue_register_sock(pool, ioqueue, items[i].server_fd, &items[i], &ioqueue_callback, &items[i].server_key); if (rc != PJ_SUCCESS) { app_perror("...error: registering server socket to ioqueue", rc); return -60; } /* Register client socket to ioqueue. */ TRACE_((THIS_FILE, " register(2)..")); rc = pj_ioqueue_register_sock(pool, ioqueue, items[i].client_fd, &items[i], &ioqueue_callback, &items[i].client_key); if (rc != PJ_SUCCESS) { app_perror("...error: registering server socket to ioqueue", rc); return -70; } /* Start reading. */ TRACE_((THIS_FILE, " pj_ioqueue_recv..")); bytes = items[i].buffer_size; rc = pj_ioqueue_recv(items[i].server_key, &items[i].recv_op, items[i].incoming_buffer, &bytes, 0); if (rc != PJ_EPENDING) { app_perror("...error: pj_ioqueue_recv", rc); return -73; } /* Start writing. */ TRACE_((THIS_FILE, " pj_ioqueue_write..")); bytes = items[i].buffer_size; rc = pj_ioqueue_send(items[i].client_key, &items[i].send_op, items[i].outgoing_buffer, &bytes, 0); if (rc != PJ_SUCCESS && rc != PJ_EPENDING) { app_perror("...error: pj_ioqueue_write", rc); return -76; } items[i].has_pending_send = (rc==PJ_EPENDING); } /* Create the threads. */ for (i=0; i<thread_cnt; ++i) { struct thread_arg *arg; arg = (struct thread_arg*) pj_pool_zalloc(pool, sizeof(*arg)); arg->id = i; arg->ioqueue = ioqueue; arg->counter = 0; rc = pj_thread_create( pool, NULL, &worker_thread, arg, PJ_THREAD_DEFAULT_STACK_SIZE, PJ_THREAD_SUSPENDED, &thread[i] ); if (rc != PJ_SUCCESS) { app_perror("...error: unable to create thread", rc); return -80; } } /* Mark start time. */ rc = pj_get_timestamp(&start); if (rc != PJ_SUCCESS) return -90; /* Start the thread. */ TRACE_((THIS_FILE, " resuming all threads..")); for (i=0; i<thread_cnt; ++i) { rc = pj_thread_resume(thread[i]); if (rc != 0) return -100; } /* Wait for MSEC_DURATION seconds. * This should be as simple as pj_thread_sleep(MSEC_DURATION) actually, * but unfortunately it doesn't work when system doesn't employ * timeslicing for threads. */ TRACE_((THIS_FILE, " wait for few seconds..")); do { pj_thread_sleep(1); /* Mark end time. */ rc = pj_get_timestamp(&stop); if (thread_quit_flag) { TRACE_((THIS_FILE, " transfer limit reached..")); break; } if (pj_elapsed_usec(&start,&stop)<MSEC_DURATION * 1000) { TRACE_((THIS_FILE, " time limit reached..")); break; } } while (1); /* Terminate all threads. */ TRACE_((THIS_FILE, " terminating all threads..")); thread_quit_flag = 1; for (i=0; i<thread_cnt; ++i) { TRACE_((THIS_FILE, " join thread %d..", i)); pj_thread_join(thread[i]); } /* Close all sockets. */ TRACE_((THIS_FILE, " closing all sockets..")); for (i=0; i<sockpair_cnt; ++i) { pj_ioqueue_unregister(items[i].server_key); pj_ioqueue_unregister(items[i].client_key); } /* Destroy threads */ for (i=0; i<thread_cnt; ++i) { pj_thread_destroy(thread[i]); } /* Destroy ioqueue. */ TRACE_((THIS_FILE, " destroying ioqueue..")); pj_ioqueue_destroy(ioqueue); /* Calculate actual time in usec. */ total_elapsed_usec = pj_elapsed_usec(&start, &stop); /* Calculate total bytes received. */ total_received = 0; for (i=0; i<sockpair_cnt; ++i) { total_received = (pj_uint32_t)items[i].bytes_recv; } /* bandwidth = total_received*1000/total_elapsed_usec */ bandwidth = total_received; pj_highprec_mul(bandwidth, 1000); pj_highprec_div(bandwidth, total_elapsed_usec); *p_bandwidth = (pj_uint32_t)bandwidth; PJ_LOG(3,(THIS_FILE, " %.4s %2d %2d %8d KB/s", type_name, thread_cnt, sockpair_cnt, *p_bandwidth)); /* Done. */ pj_pool_release(pool); TRACE_((THIS_FILE, " done..")); return 0; }
/* * Benchmarking IOQueue */ static int bench_test(pj_bool_t allow_concur, int bufsize, int inactive_sock_count) { pj_sock_t ssock=-1, csock=-1; pj_sockaddr_in addr; pj_pool_t *pool = NULL; pj_sock_t *inactive_sock=NULL; pj_ioqueue_op_key_t *inactive_read_op; char *send_buf, *recv_buf; pj_ioqueue_t *ioque = NULL; pj_ioqueue_key_t *skey, *ckey, *keys[SOCK_INACTIVE_MAX+2]; pj_timestamp t1, t2, t_elapsed; int rc=0, i; /* i must be signed */ pj_str_t temp; char errbuf[PJ_ERR_MSG_SIZE]; TRACE__((THIS_FILE, " bench test %d", inactive_sock_count)); // Create pool. pool = pj_pool_create(mem, NULL, POOL_SIZE, 4000, NULL); // Allocate buffers for send and receive. send_buf = (char*)pj_pool_alloc(pool, bufsize); recv_buf = (char*)pj_pool_alloc(pool, bufsize); // Allocate sockets for sending and receiving. rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &ssock); if (rc == PJ_SUCCESS) { rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &csock); } else csock = PJ_INVALID_SOCKET; if (rc != PJ_SUCCESS) { app_perror("...error: pj_sock_socket()", rc); goto on_error; } // Bind server socket. pj_bzero(&addr, sizeof(addr)); addr.sin_family = pj_AF_INET(); addr.sin_port = pj_htons(PORT); if (pj_sock_bind(ssock, &addr, sizeof(addr))) goto on_error; pj_assert(inactive_sock_count+2 <= PJ_IOQUEUE_MAX_HANDLES); // Create I/O Queue. rc = pj_ioqueue_create(pool, PJ_IOQUEUE_MAX_HANDLES, &ioque); if (rc != PJ_SUCCESS) { app_perror("...error: pj_ioqueue_create()", rc); goto on_error; } // Set concurrency rc = pj_ioqueue_set_default_concurrency(ioque, allow_concur); if (rc != PJ_SUCCESS) { app_perror("...error: pj_ioqueue_set_default_concurrency()", rc); goto on_error; } // Allocate inactive sockets, and bind them to some arbitrary address. // Then register them to the I/O queue, and start a read operation. inactive_sock = (pj_sock_t*)pj_pool_alloc(pool, inactive_sock_count*sizeof(pj_sock_t)); inactive_read_op = (pj_ioqueue_op_key_t*)pj_pool_alloc(pool, inactive_sock_count*sizeof(pj_ioqueue_op_key_t)); pj_bzero(&addr, sizeof(addr)); addr.sin_family = pj_AF_INET(); for (i=0; i<inactive_sock_count; ++i) { pj_ssize_t bytes; rc = pj_sock_socket(pj_AF_INET(), pj_SOCK_DGRAM(), 0, &inactive_sock[i]); if (rc != PJ_SUCCESS || inactive_sock[i] < 0) { app_perror("...error: pj_sock_socket()", rc); goto on_error; } if ((rc=pj_sock_bind(inactive_sock[i], &addr, sizeof(addr))) != 0) { pj_sock_close(inactive_sock[i]); inactive_sock[i] = PJ_INVALID_SOCKET; app_perror("...error: pj_sock_bind()", rc); goto on_error; } rc = pj_ioqueue_register_sock(pool, ioque, inactive_sock[i], NULL, &test_cb, &keys[i]); if (rc != PJ_SUCCESS) { pj_sock_close(inactive_sock[i]); inactive_sock[i] = PJ_INVALID_SOCKET; app_perror("...error(1): pj_ioqueue_register_sock()", rc); PJ_LOG(3,(THIS_FILE, "....i=%d", i)); goto on_error; } bytes = bufsize; rc = pj_ioqueue_recv(keys[i], &inactive_read_op[i], recv_buf, &bytes, 0); if (rc != PJ_EPENDING) { pj_sock_close(inactive_sock[i]); inactive_sock[i] = PJ_INVALID_SOCKET; app_perror("...error: pj_ioqueue_read()", rc); goto on_error; } } // Register server and client socket. // We put this after inactivity socket, hopefully this can represent the // worst waiting time. rc = pj_ioqueue_register_sock(pool, ioque, ssock, NULL, &test_cb, &skey); if (rc != PJ_SUCCESS) { app_perror("...error(2): pj_ioqueue_register_sock()", rc); goto on_error; } rc = pj_ioqueue_register_sock(pool, ioque, csock, NULL, &test_cb, &ckey); if (rc != PJ_SUCCESS) { app_perror("...error(3): pj_ioqueue_register_sock()", rc); goto on_error; } // Set destination address to send the packet. pj_sockaddr_in_init(&addr, pj_cstr(&temp, "127.0.0.1"), PORT); // Test loop. t_elapsed.u64 = 0; for (i=0; i<LOOP; ++i) { pj_ssize_t bytes; pj_ioqueue_op_key_t read_op, write_op; // Randomize send buffer. pj_create_random_string(send_buf, bufsize); // Start reading on the server side. bytes = bufsize; rc = pj_ioqueue_recv(skey, &read_op, recv_buf, &bytes, 0); if (rc != PJ_EPENDING) { app_perror("...error: pj_ioqueue_read()", rc); break; } // Starts send on the client side. bytes = bufsize; rc = pj_ioqueue_sendto(ckey, &write_op, send_buf, &bytes, 0, &addr, sizeof(addr)); if (rc != PJ_SUCCESS && rc != PJ_EPENDING) { app_perror("...error: pj_ioqueue_write()", rc); break; } if (rc == PJ_SUCCESS) { if (bytes < 0) { app_perror("...error: pj_ioqueue_sendto()",(pj_status_t)-bytes); break; } } // Begin time. pj_get_timestamp(&t1); // Poll the queue until we've got completion event in the server side. callback_read_key = NULL; callback_read_size = 0; TRACE__((THIS_FILE, " waiting for key = %p", skey)); do { pj_time_val timeout = { 1, 0 }; #ifdef PJ_SYMBIAN rc = pj_symbianos_poll(-1, PJ_TIME_VAL_MSEC(timeout)); #else rc = pj_ioqueue_poll(ioque, &timeout); #endif TRACE__((THIS_FILE, " poll rc=%d", rc)); } while (rc >= 0 && callback_read_key != skey); // End time. pj_get_timestamp(&t2); t_elapsed.u64 += (t2.u64 - t1.u64); if (rc < 0) { app_perror(" error: pj_ioqueue_poll", -rc); break; } // Compare recv buffer with send buffer. if (callback_read_size != bufsize || pj_memcmp(send_buf, recv_buf, bufsize)) { rc = -10; PJ_LOG(3,(THIS_FILE, " error: size/buffer mismatch")); break; } // Poll until all events are exhausted, before we start the next loop. do { pj_time_val timeout = { 0, 10 }; #ifdef PJ_SYMBIAN PJ_UNUSED_ARG(timeout); rc = pj_symbianos_poll(-1, 100); #else rc = pj_ioqueue_poll(ioque, &timeout); #endif } while (rc>0); rc = 0; } // Print results if (rc == 0) { pj_timestamp tzero; pj_uint32_t usec_delay; tzero.u32.hi = tzero.u32.lo = 0; usec_delay = pj_elapsed_usec( &tzero, &t_elapsed); PJ_LOG(3, (THIS_FILE, "...%10d %15d % 9d", bufsize, inactive_sock_count, usec_delay)); } else { PJ_LOG(2, (THIS_FILE, "...ERROR rc=%d (buf:%d, fds:%d)", rc, bufsize, inactive_sock_count+2)); } // Cleaning up. for (i=inactive_sock_count-1; i>=0; --i) { pj_ioqueue_unregister(keys[i]); } pj_ioqueue_unregister(skey); pj_ioqueue_unregister(ckey); pj_ioqueue_destroy(ioque); pj_pool_release( pool); return rc; on_error: PJ_LOG(1,(THIS_FILE, "...ERROR: %s", pj_strerror(pj_get_netos_error(), errbuf, sizeof(errbuf)))); if (ssock) pj_sock_close(ssock); if (csock) pj_sock_close(csock); for (i=0; i<inactive_sock_count && inactive_sock && inactive_sock[i]!=PJ_INVALID_SOCKET; ++i) { pj_sock_close(inactive_sock[i]); } if (ioque != NULL) pj_ioqueue_destroy(ioque); pj_pool_release( pool); return -1; }
/* * pj_ioqueue_poll() * */ PJ_DEF(int) pj_ioqueue_poll( pj_ioqueue_t *ioqueue, const pj_time_val *timeout) { int i, count, processed; int msec; //struct epoll_event *events = ioqueue->events; //struct queue *queue = ioqueue->queue; struct epoll_event events[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL]; struct queue queue[PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL]; pj_timestamp t1, t2; PJ_CHECK_STACK(); msec = timeout ? PJ_TIME_VAL_MSEC(*timeout) : 9000; TRACE_((THIS_FILE, "start os_epoll_wait, msec=%d", msec)); pj_get_timestamp(&t1); //count = os_epoll_wait( ioqueue->epfd, events, ioqueue->max, msec); count = os_epoll_wait( ioqueue->epfd, events, PJ_IOQUEUE_MAX_EVENTS_IN_SINGLE_POLL, msec); if (count == 0) { #if PJ_IOQUEUE_HAS_SAFE_UNREG /* Check the closing keys only when there's no activity and when there are * pending closing keys. */ if (count == 0 && !pj_list_empty(&ioqueue->closing_list)) { pj_lock_acquire(ioqueue->lock); scan_closing_keys(ioqueue); pj_lock_release(ioqueue->lock); } #endif TRACE_((THIS_FILE, "os_epoll_wait timed out")); return count; } else if (count < 0) { TRACE_((THIS_FILE, "os_epoll_wait error")); return -pj_get_netos_error(); } pj_get_timestamp(&t2); TRACE_((THIS_FILE, "os_epoll_wait returns %d, time=%d usec", count, pj_elapsed_usec(&t1, &t2))); /* Lock ioqueue. */ pj_lock_acquire(ioqueue->lock); for (processed=0, i=0; i<count; ++i) { pj_ioqueue_key_t *h = (pj_ioqueue_key_t*)(epoll_data_type) events[i].epoll_data; TRACE_((THIS_FILE, "event %d: events=%d", i, events[i].events)); /* * Check readability. */ if ((events[i].events & EPOLLIN) && (key_has_pending_read(h) || key_has_pending_accept(h)) && !IS_CLOSING(h) ) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[processed].key = h; queue[processed].event_type = READABLE_EVENT; ++processed; continue; } /* * Check for writeability. */ if ((events[i].events & EPOLLOUT) && key_has_pending_write(h) && !IS_CLOSING(h)) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[processed].key = h; queue[processed].event_type = WRITEABLE_EVENT; ++processed; continue; } #if PJ_HAS_TCP /* * Check for completion of connect() operation. */ if ((events[i].events & EPOLLOUT) && (h->connecting) && !IS_CLOSING(h)) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[processed].key = h; queue[processed].event_type = WRITEABLE_EVENT; ++processed; continue; } #endif /* PJ_HAS_TCP */ /* * Check for error condition. */ if ((events[i].events & EPOLLERR) && !IS_CLOSING(h)) { /* * We need to handle this exception event. If it's related to us * connecting, report it as such. If not, just report it as a * read event and the higher layers will handle it. */ if (h->connecting) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[processed].key = h; queue[processed].event_type = EXCEPTION_EVENT; ++processed; } else if (key_has_pending_read(h) || key_has_pending_accept(h)) { #if PJ_IOQUEUE_HAS_SAFE_UNREG increment_counter(h); #endif queue[processed].key = h; queue[processed].event_type = READABLE_EVENT; ++processed; } continue; } } for (i=0; i<processed; ++i) { if (queue[i].key->grp_lock) pj_grp_lock_add_ref_dbg(queue[i].key->grp_lock, "ioqueue", 0); } PJ_RACE_ME(5); pj_lock_release(ioqueue->lock); PJ_RACE_ME(5); /* Now process the events. */ for (i=0; i<processed; ++i) { switch (queue[i].event_type) { case READABLE_EVENT: ioqueue_dispatch_read_event(ioqueue, queue[i].key); break; case WRITEABLE_EVENT: ioqueue_dispatch_write_event(ioqueue, queue[i].key); break; case EXCEPTION_EVENT: ioqueue_dispatch_exception_event(ioqueue, queue[i].key); break; case NO_EVENT: pj_assert(!"Invalid event!"); break; } #if PJ_IOQUEUE_HAS_SAFE_UNREG decrement_counter(queue[i].key); #endif if (queue[i].key->grp_lock) pj_grp_lock_dec_ref_dbg(queue[i].key->grp_lock, "ioqueue", 0); } /* Special case: * When epoll returns > 0 but no descriptors are actually set! */ if (count > 0 && !processed && msec > 0) { pj_thread_sleep(msec); } pj_get_timestamp(&t1); TRACE_((THIS_FILE, "ioqueue_poll() returns %d, time=%d usec", processed, pj_elapsed_usec(&t2, &t1))); return processed; }
int timestamp_test(void) { enum { CONSECUTIVE_LOOP = 100 }; volatile unsigned i; pj_timestamp freq, t1, t2; pj_time_val tv1, tv2; unsigned elapsed; pj_status_t rc; PJ_LOG(3,(THIS_FILE, "...Testing timestamp (high res time)")); /* Get and display timestamp frequency. */ if ((rc=pj_get_timestamp_freq(&freq)) != PJ_SUCCESS) { app_perror("...ERROR: get timestamp freq", rc); return -1000; } PJ_LOG(3,(THIS_FILE, "....frequency: hiword=%lu loword=%lu", freq.u32.hi, freq.u32.lo)); PJ_LOG(3,(THIS_FILE, "...checking if time can run backwards (pls wait)..")); /* * Check if consecutive readings should yield timestamp value * that is bigger than previous value. * First we get the first timestamp. */ rc = pj_get_timestamp(&t1); if (rc != PJ_SUCCESS) { app_perror("...ERROR: pj_get_timestamp", rc); return -1001; } rc = pj_gettimeofday(&tv1); if (rc != PJ_SUCCESS) { app_perror("...ERROR: pj_gettimeofday", rc); return -1002; } for (i=0; i<CONSECUTIVE_LOOP; ++i) { pj_thread_sleep(pj_rand() % 100); rc = pj_get_timestamp(&t2); if (rc != PJ_SUCCESS) { app_perror("...ERROR: pj_get_timestamp", rc); return -1003; } rc = pj_gettimeofday(&tv2); if (rc != PJ_SUCCESS) { app_perror("...ERROR: pj_gettimeofday", rc); return -1004; } /* compare t2 with t1, expecting t2 >= t1. */ if (t2.u32.hi < t1.u32.hi || (t2.u32.hi == t1.u32.hi && t2.u32.lo < t1.u32.lo)) { PJ_LOG(3,(THIS_FILE, "...ERROR: timestamp run backwards!")); return -1005; } /* compare tv2 with tv1, expecting tv2 >= tv1. */ if (PJ_TIME_VAL_LT(tv2, tv1)) { PJ_LOG(3,(THIS_FILE, "...ERROR: time run backwards!")); return -1006; } } /* * Simple test to time some loop. */ PJ_LOG(3,(THIS_FILE, "....testing simple 1000000 loop")); /* Mark start time. */ if ((rc=pj_get_timestamp(&t1)) != PJ_SUCCESS) { app_perror("....error: cat't get timestamp", rc); return -1010; } /* Loop.. */ for (i=0; i<1000000; ++i) { /* Try to do something so that smart compilers wont * remove this silly loop. */ null_func(); } pj_thread_sleep(0); /* Mark end time. */ pj_get_timestamp(&t2); /* Get elapsed time in usec. */ elapsed = pj_elapsed_usec(&t1, &t2); PJ_LOG(3,(THIS_FILE, "....elapsed: %u usec", (unsigned)elapsed)); /* See if elapsed time is "reasonable". * This should be good even on 50Mhz embedded powerpc. */ if (elapsed < 1 || elapsed > 1000000) { PJ_LOG(3,(THIS_FILE, "....error: elapsed time outside window (%u, " "t1.u32.hi=%u, t1.u32.lo=%u, " "t2.u32.hi=%u, t2.u32.lo=%u)", elapsed, t1.u32.hi, t1.u32.lo, t2.u32.hi, t2.u32.lo)); return -1030; } /* Testing time/timestamp accuracy */ rc = timestamp_accuracy(); if (rc != 0) return rc; return 0; }
static int uri_benchmark(unsigned *p_parse, unsigned *p_print, unsigned *p_cmp) { unsigned i, loop; pj_status_t status = PJ_SUCCESS; pj_timestamp zero; pj_time_val elapsed; pj_highprec_t avg_parse, avg_print, avg_cmp, kbytes; pj_bzero(&var, sizeof(var)); zero.u32.hi = zero.u32.lo = 0; var.parse_len = var.print_len = var.cmp_len = 0; var.parse_time.u32.hi = var.parse_time.u32.lo = 0; var.print_time.u32.hi = var.print_time.u32.lo = 0; var.cmp_time.u32.hi = var.cmp_time.u32.lo = 0; for (loop=0; loop<LOOP_COUNT; ++loop) { for (i=0; i<PJ_ARRAY_SIZE(uri_test_array); ++i) { pj_pool_t *pool; pool = pjsip_endpt_create_pool(endpt, "", POOL_SIZE, POOL_SIZE); status = do_uri_test(pool, &uri_test_array[i]); pjsip_endpt_release_pool(endpt, pool); if (status != PJ_SUCCESS) { PJ_LOG(3,(THIS_FILE, " error %d when testing entry %d", status, i)); pjsip_endpt_release_pool(endpt, pool); goto on_return; } } } kbytes = var.parse_len; pj_highprec_mod(kbytes, 1000000); pj_highprec_div(kbytes, 100000); elapsed = pj_elapsed_time(&zero, &var.parse_time); avg_parse = pj_elapsed_usec(&zero, &var.parse_time); pj_highprec_mul(avg_parse, AVERAGE_URL_LEN); pj_highprec_div(avg_parse, var.parse_len); if (avg_parse == 0) avg_parse = 1; avg_parse = 1000000 / avg_parse; PJ_LOG(3,(THIS_FILE, " %u.%u MB of urls parsed in %d.%03ds (avg=%d urls/sec)", (unsigned)(var.parse_len/1000000), (unsigned)kbytes, elapsed.sec, elapsed.msec, (unsigned)avg_parse)); *p_parse = (unsigned)avg_parse; kbytes = var.print_len; pj_highprec_mod(kbytes, 1000000); pj_highprec_div(kbytes, 100000); elapsed = pj_elapsed_time(&zero, &var.print_time); avg_print = pj_elapsed_usec(&zero, &var.print_time); pj_highprec_mul(avg_print, AVERAGE_URL_LEN); pj_highprec_div(avg_print, var.parse_len); if (avg_print == 0) avg_print = 1; avg_print = 1000000 / avg_print; PJ_LOG(3,(THIS_FILE, " %u.%u MB of urls printed in %d.%03ds (avg=%d urls/sec)", (unsigned)(var.print_len/1000000), (unsigned)kbytes, elapsed.sec, elapsed.msec, (unsigned)avg_print)); *p_print = (unsigned)avg_print; kbytes = var.cmp_len; pj_highprec_mod(kbytes, 1000000); pj_highprec_div(kbytes, 100000); elapsed = pj_elapsed_time(&zero, &var.cmp_time); avg_cmp = pj_elapsed_usec(&zero, &var.cmp_time); pj_highprec_mul(avg_cmp, AVERAGE_URL_LEN); pj_highprec_div(avg_cmp, var.cmp_len); if (avg_cmp == 0) avg_cmp = 1; avg_cmp = 1000000 / avg_cmp; PJ_LOG(3,(THIS_FILE, " %u.%u MB of urls compared in %d.%03ds (avg=%d urls/sec)", (unsigned)(var.cmp_len/1000000), (unsigned)kbytes, elapsed.sec, elapsed.msec, (unsigned)avg_cmp)); *p_cmp = (unsigned)avg_cmp; on_return: return status; }
/* * sock_producer_consumer() * * Simple producer-consumer benchmarking. Send loop number of * buf_size size packets as fast as possible. */ static int sock_producer_consumer(int sock_type, unsigned buf_size, unsigned loop, unsigned *p_bandwidth) { pj_sock_t consumer, producer; pj_pool_t *pool; char *outgoing_buffer, *incoming_buffer; pj_timestamp start, stop; unsigned i; pj_highprec_t elapsed, bandwidth; pj_size_t total_received; pj_status_t rc; /* Create pool. */ pool = pj_pool_create(mem, NULL, 4096, 4096, NULL); if (!pool) return -10; /* Create producer-consumer pair. */ rc = app_socketpair(PJ_AF_INET, sock_type, 0, &consumer, &producer); if (rc != PJ_SUCCESS) { app_perror("...error: create socket pair", rc); return -20; } /* Create buffers. */ outgoing_buffer = pj_pool_alloc(pool, buf_size); incoming_buffer = pj_pool_alloc(pool, buf_size); /* Start loop. */ pj_get_timestamp(&start); total_received = 0; for (i=0; i<loop; ++i) { pj_ssize_t sent, part_received, received; pj_time_val delay; sent = buf_size; rc = pj_sock_send(producer, outgoing_buffer, &sent, 0); if (rc != PJ_SUCCESS || sent != (pj_ssize_t)buf_size) { app_perror("...error: send()", rc); return -61; } /* Repeat recv() until all data is part_received. * This applies only for non-UDP of course, since for UDP * we would expect all data to be part_received in one packet. */ received = 0; do { part_received = buf_size-received; rc = pj_sock_recv(consumer, incoming_buffer+received, &part_received, 0); if (rc != PJ_SUCCESS) { app_perror("...recv error", rc); return -70; } if (part_received <= 0) { PJ_LOG(3,("", "...error: socket has closed (part_received=%d)!", part_received)); return -73; } if ((pj_size_t)part_received != buf_size-received) { if (sock_type != PJ_SOCK_STREAM) { PJ_LOG(3,("", "...error: expecting %u bytes, got %u bytes", buf_size-received, part_received)); return -76; } } received += part_received; } while ((pj_size_t)received < buf_size); total_received += received; /* Stop test if it's been runnign for more than 10 secs. */ pj_get_timestamp(&stop); delay = pj_elapsed_time(&start, &stop); if (delay.sec > 10) break; } /* Stop timer. */ pj_get_timestamp(&stop); elapsed = pj_elapsed_usec(&start, &stop); /* bandwidth = total_received * 1000 / elapsed */ bandwidth = total_received; pj_highprec_mul(bandwidth, 1000); pj_highprec_div(bandwidth, elapsed); *p_bandwidth = (pj_uint32_t)bandwidth; /* Close sockets. */ pj_sock_close(consumer); pj_sock_close(producer); /* Done */ pj_pool_release(pool); return 0; }
int expand(pj_pool_t *pool, const char *filein, const char *fileout, int expansion_rate100, int lost_rate10, int lost_burst) { enum { LOST_RATE = 10 }; FILE *in, *out; short frame[SAMPLES_PER_FRAME]; pjmedia_wsola *wsola; pj_timestamp elapsed, zero; unsigned samples; int last_lost = 0; /* Lost burst must be > 0 */ assert(lost_rate10==0 || lost_burst > 0); in = fopen(filein, "rb"); if (!in) return 1; out = fopen(fileout, "wb"); if (!out) return 1; pjmedia_wsola_create(pool, CLOCK_RATE, SAMPLES_PER_FRAME, 1, 0, &wsola); samples = 0; elapsed.u64 = 0; while (fread(frame, SAMPLES_PER_FRAME*2, 1, in) == 1) { pj_timestamp t1, t2; if (lost_rate10 == 0) { /* Expansion */ pj_get_timestamp(&t1); pjmedia_wsola_save(wsola, frame, 0); pj_get_timestamp(&t2); pj_sub_timestamp(&t2, &t1); pj_add_timestamp(&elapsed, &t2); fwrite(frame, SAMPLES_PER_FRAME*2, 1, out); samples += SAMPLES_PER_FRAME; if ((rand() % 100) < expansion_rate100) { pj_get_timestamp(&t1); pjmedia_wsola_generate(wsola, frame); pj_get_timestamp(&t2); pj_sub_timestamp(&t2, &t1); pj_add_timestamp(&elapsed, &t2); samples += SAMPLES_PER_FRAME; fwrite(frame, SAMPLES_PER_FRAME*2, 1, out); } } else { /* Lost */ if ((rand() % 10) < lost_rate10) { int burst; for (burst=0; burst<lost_burst; ++burst) { pj_get_timestamp(&t1); pjmedia_wsola_generate(wsola, frame); pj_get_timestamp(&t2); pj_sub_timestamp(&t2, &t1); pj_add_timestamp(&elapsed, &t2); samples += SAMPLES_PER_FRAME; fwrite(frame, SAMPLES_PER_FRAME*2, 1, out); } last_lost = 1; } else { pj_get_timestamp(&t1); pjmedia_wsola_save(wsola, frame, last_lost); pj_get_timestamp(&t2); pj_sub_timestamp(&t2, &t1); pj_add_timestamp(&elapsed, &t2); samples += SAMPLES_PER_FRAME; fwrite(frame, SAMPLES_PER_FRAME*2, 1, out); last_lost = 0; } } } zero.u64 = 0; zero.u64 = pj_elapsed_usec(&zero, &elapsed); zero.u64 = samples * PJ_INT64(1000000) / zero.u64; assert(zero.u32.hi == 0); PJ_LOG(3,("test.c", "Processing: %f Msamples per second", zero.u32.lo/1000000.0)); PJ_LOG(3,("test.c", "CPU load for current settings: %f%%", CLOCK_RATE * 100.0 / zero.u32.lo)); pjmedia_wsola_destroy(wsola); fclose(in); fclose(out); return 0; }
int compress(pj_pool_t *pool, const char *filein, const char *fileout, int rate10) { enum { BUF_CNT = SAMPLES_PER_FRAME * 10 }; FILE *in, *out; pjmedia_wsola *wsola; short buf[BUF_CNT]; pj_timestamp elapsed, zero; unsigned samples = 0; in = fopen(filein, "rb"); if (!in) return 1; out = fopen(fileout, "wb"); if (!out) return 1; pjmedia_wsola_create(pool, CLOCK_RATE, SAMPLES_PER_FRAME, 1, 0, &wsola); elapsed.u64 = 0; for (;;) { unsigned size_del, count; pj_timestamp t1, t2; int i; if (fread(buf, sizeof(buf), 1, in) != 1) break; count = BUF_CNT; size_del = 0; pj_get_timestamp(&t1); for (i=0; i<rate10; ++i) { unsigned to_del = SAMPLES_PER_FRAME; #if 0 /* Method 1: buf1 contiguous */ pjmedia_wsola_discard(wsola, buf, count, NULL, 0, &to_del); #elif 0 /* Method 2: split, majority in buf1 */ assert(count > SAMPLES_PER_FRAME); pjmedia_wsola_discard(wsola, buf, count-SAMPLES_PER_FRAME, buf+count-SAMPLES_PER_FRAME, SAMPLES_PER_FRAME, &to_del); #elif 0 /* Method 3: split, majority in buf2 */ assert(count > SAMPLES_PER_FRAME); pjmedia_wsola_discard(wsola, buf, SAMPLES_PER_FRAME, buf+SAMPLES_PER_FRAME, count-SAMPLES_PER_FRAME, &to_del); #elif 1 /* Method 4: split, each with small length */ enum { TOT_LEN = 3 * SAMPLES_PER_FRAME }; unsigned buf1_len = (rand() % TOT_LEN); short *ptr = buf + count - TOT_LEN; assert(count > TOT_LEN); if (buf1_len==0) buf1_len=SAMPLES_PER_FRAME*2; pjmedia_wsola_discard(wsola, ptr, buf1_len, ptr+buf1_len, TOT_LEN-buf1_len, &to_del); #endif count -= to_del; size_del += to_del; } pj_get_timestamp(&t2); samples += BUF_CNT; pj_sub_timestamp(&t2, &t1); pj_add_timestamp(&elapsed, &t2); assert(size_del >= SAMPLES_PER_FRAME); fwrite(buf, count, 2, out); } pjmedia_wsola_destroy(wsola); fclose(in); fclose(out); zero.u64 = 0; zero.u64 = pj_elapsed_usec(&zero, &elapsed); zero.u64 = samples * PJ_INT64(1000000) / zero.u64; assert(zero.u32.hi == 0); PJ_LOG(3,("test.c", "Processing: %f Msamples per second", zero.u32.lo/1000000.0)); PJ_LOG(3,("test.c", "CPU load for current settings: %f%%", CLOCK_RATE * 100.0 / zero.u32.lo)); return 0; }
pj_uint32_t to_usec() const { Pj_Timestamp zero; pj_memset(&zero, 0, sizeof(zero)); return pj_elapsed_usec(&zero.ts_, &ts_); }