/*! \brief Callback function for reading from a Snoop channel */ static struct ast_frame *snoop_read(struct ast_channel *chan) { struct stasis_app_snoop *snoop = ast_channel_tech_pvt(chan); struct ast_frame *frame = NULL; /* If we fail to ack the timer OR if any active audiohooks are done hangup */ if ((ast_timer_ack(snoop->timer, 1) < 0) || (snoop->spy_active && snoop->spy.status != AST_AUDIOHOOK_STATUS_RUNNING) || (snoop->whisper_active && snoop->whisper.status != AST_AUDIOHOOK_STATUS_RUNNING)) { return NULL; } /* Only get audio from the spy audiohook if it is active */ if (snoop->spy_active) { ast_audiohook_lock(&snoop->spy); frame = ast_audiohook_read_frame(&snoop->spy, snoop->spy_samples, snoop->spy_direction, snoop->spy_format); ast_audiohook_unlock(&snoop->spy); } return frame ? frame : &ast_null_frame; }
/*! \brief Callback function for reading from a Snoop channel */ static struct ast_frame *snoop_read(struct ast_channel *chan) { struct stasis_app_snoop *snoop = ast_channel_tech_pvt(chan); struct ast_frame *frame = NULL; /* If we fail to ack the timer OR if any active audiohooks are done hangup */ if ((ast_timer_ack(snoop->timer, 1) < 0) || (snoop->spy_active && snoop->spy.status != AST_AUDIOHOOK_STATUS_RUNNING) || (snoop->whisper_active && snoop->whisper.status != AST_AUDIOHOOK_STATUS_RUNNING)) { return NULL; } /* Only get audio from the spy audiohook if it is active */ if (!snoop->spy_active) { return &ast_null_frame; } ast_audiohook_lock(&snoop->spy); if (snoop->spy_direction != AST_AUDIOHOOK_DIRECTION_BOTH) { /* * When a singular direction is chosen frames are still written to the * opposing direction's queue. Those frames must be read so the queue * does not continue to grow, however since they are not needed for the * selected direction they can be dropped. */ enum ast_audiohook_direction opposing_direction = snoop->spy_direction == AST_AUDIOHOOK_DIRECTION_READ ? AST_AUDIOHOOK_DIRECTION_WRITE : AST_AUDIOHOOK_DIRECTION_READ; ast_frame_dtor(ast_audiohook_read_frame(&snoop->spy, snoop->spy_samples, opposing_direction, snoop->spy_format)); } frame = ast_audiohook_read_frame(&snoop->spy, snoop->spy_samples, snoop->spy_direction, snoop->spy_format); ast_audiohook_unlock(&snoop->spy); return frame ? frame : &ast_null_frame; }
static char *timing_test(struct ast_cli_entry *e, int cmd, struct ast_cli_args *a) { struct ast_timer *timer; int count = 0; struct timeval start, end; unsigned int test_rate = 50; switch (cmd) { case CLI_INIT: e->command = "timing test"; e->usage = "Usage: timing test <rate>\n" " Test a timer with a specified rate, 50/sec by default.\n" ""; return NULL; case CLI_GENERATE: return NULL; } if (a->argc != 2 && a->argc != 3) { return CLI_SHOWUSAGE; } if (a->argc == 3) { unsigned int rate; if (sscanf(a->argv[2], "%30u", &rate) == 1) { test_rate = rate; } else { ast_cli(a->fd, "Invalid rate '%s', using default of %u\n", a->argv[2], test_rate); } } ast_cli(a->fd, "Attempting to test a timer with %u ticks per second.\n", test_rate); if (!(timer = ast_timer_open())) { ast_cli(a->fd, "Failed to open timing fd\n"); return CLI_FAILURE; } ast_cli(a->fd, "Using the '%s' timing module for this test.\n", timer->holder->iface->name); start = ast_tvnow(); ast_timer_set_rate(timer, test_rate); while (ast_tvdiff_ms((end = ast_tvnow()), start) < 1000) { int res; struct pollfd pfd = { .fd = ast_timer_fd(timer), .events = POLLIN | POLLPRI, }; res = ast_poll(&pfd, 1, 100); if (res == 1) { count++; if (ast_timer_ack(timer, 1) < 0) { ast_cli(a->fd, "Timer failed to acknowledge.\n"); ast_timer_close(timer); return CLI_FAILURE; } } else if (!res) { ast_cli(a->fd, "poll() timed out! This is bad.\n"); } else if (errno != EAGAIN && errno != EINTR) { ast_cli(a->fd, "poll() returned error: %s\n", strerror(errno)); } } ast_timer_close(timer); timer = NULL; ast_cli(a->fd, "It has been %" PRIi64 " milliseconds, and we got %d timer ticks\n", ast_tvdiff_ms(end, start), count); return CLI_SUCCESS; } static struct ast_cli_entry cli_timing[] = { AST_CLI_DEFINE(timing_test, "Run a timing test"), }; static void timing_shutdown(void) { ast_cli_unregister_multiple(cli_timing, ARRAY_LEN(cli_timing)); ast_heap_destroy(timing_interfaces); timing_interfaces = NULL; }
static struct ast_frame *hook_event_cb(struct ast_channel *chan, struct ast_frame *frame, enum ast_framehook_event event, void *data) { struct jb_framedata *framedata = data; struct timeval now_tv; unsigned long now; int putframe = 0; /* signifies if audio frame was placed into the buffer or not */ switch (event) { case AST_FRAMEHOOK_EVENT_READ: break; case AST_FRAMEHOOK_EVENT_ATTACHED: case AST_FRAMEHOOK_EVENT_DETACHED: case AST_FRAMEHOOK_EVENT_WRITE: return frame; } if (ast_channel_fdno(chan) == AST_JITTERBUFFER_FD && framedata->timer) { if (ast_timer_ack(framedata->timer, 1) < 0) { ast_log(LOG_ERROR, "Failed to acknowledge timer in jitter buffer\n"); return frame; } } if (!frame) { return frame; } now_tv = ast_tvnow(); now = ast_tvdiff_ms(now_tv, framedata->start_tv); if (frame->frametype == AST_FRAME_VOICE) { int res; struct ast_frame *jbframe; if (!ast_test_flag(frame, AST_FRFLAG_HAS_TIMING_INFO) || frame->len < 2 || frame->ts < 0) { /* only frames with timing info can enter the jitterbuffer */ return frame; } jbframe = ast_frisolate(frame); ao2_replace(framedata->last_format, frame->subclass.format); if (frame->len && (frame->len != framedata->timer_interval)) { framedata->timer_interval = frame->len; ast_timer_set_rate(framedata->timer, 1000 / framedata->timer_interval); } if (!framedata->first) { framedata->first = 1; res = framedata->jb_impl->put_first(framedata->jb_obj, jbframe, now); } else { res = framedata->jb_impl->put(framedata->jb_obj, jbframe, now); } if (res == AST_JB_IMPL_OK) { if (jbframe != frame) { ast_frfree(frame); } frame = &ast_null_frame; } else if (jbframe != frame) { ast_frfree(jbframe); } putframe = 1; } if (frame->frametype == AST_FRAME_NULL) { int res; long next = framedata->jb_impl->next(framedata->jb_obj); /* If now is earlier than the next expected output frame * from the jitterbuffer we may choose to pass on retrieving * a frame during this read iteration. The only exception * to this rule is when an audio frame is placed into the buffer * and the time for the next frame to come out of the buffer is * at least within the timer_interval of the next output frame. By * doing this we are able to feed off the timing of the input frames * and only rely on our jitterbuffer timer when frames are dropped. * During testing, this hybrid form of timing gave more reliable results. */ if (now < next) { long int diff = next - now; if (!putframe) { return frame; } else if (diff >= framedata->timer_interval) { return frame; } } ast_frfree(frame); frame = &ast_null_frame; res = framedata->jb_impl->get(framedata->jb_obj, &frame, now, framedata->timer_interval); switch (res) { case AST_JB_IMPL_OK: /* got it, and pass it through */ break; case AST_JB_IMPL_DROP: ast_frfree(frame); frame = &ast_null_frame; break; case AST_JB_IMPL_INTERP: if (framedata->last_format) { struct ast_frame tmp = { 0, }; tmp.frametype = AST_FRAME_VOICE; tmp.subclass.format = framedata->last_format; /* example: 8000hz / (1000 / 20ms) = 160 samples */ tmp.samples = ast_format_get_sample_rate(framedata->last_format) / (1000 / framedata->timer_interval); tmp.delivery = ast_tvadd(framedata->start_tv, ast_samp2tv(next, 1000)); tmp.offset = AST_FRIENDLY_OFFSET; tmp.src = "func_jitterbuffer interpolation"; ast_frfree(frame); frame = ast_frdup(&tmp); break; } /* else fall through */ case AST_JB_IMPL_NOFRAME: ast_frfree(frame); frame = &ast_null_frame; break; } } if (frame->frametype == AST_FRAME_CONTROL) { switch(frame->subclass.integer) { case AST_CONTROL_HOLD: case AST_CONTROL_UNHOLD: case AST_CONTROL_T38_PARAMETERS: case AST_CONTROL_SRCUPDATE: case AST_CONTROL_SRCCHANGE: framedata->jb_impl->force_resync(framedata->jb_obj); break; default: break; } } return frame; }