Example #1
1
/*!
 * \brief Create a new OGG/Speex filestream and set it up for reading.
 * \param fs File that points to on disk storage of the OGG/Speex data.
 * \return The new filestream.
 */
static int ogg_speex_open(struct ast_filestream *fs)
{
	char *buffer;
	size_t bytes;
	struct speex_desc *s = (struct speex_desc *)fs->_private;
	SpeexHeader *hdr = NULL;
	int i, result, expected_rate;

	expected_rate = ast_format_get_sample_rate(fs->fmt->format);
	s->serialno = -1;
	ogg_sync_init(&s->oy);

	buffer = ogg_sync_buffer(&s->oy, BLOCK_SIZE);
	bytes = fread(buffer, 1, BLOCK_SIZE, fs->f);
	ogg_sync_wrote(&s->oy, bytes);

	result = ogg_sync_pageout(&s->oy, &s->og);
	if (result != 1) {
		if(bytes < BLOCK_SIZE) {
			ast_log(LOG_ERROR, "Run out of data...\n");
		} else {
			ast_log(LOG_ERROR, "Input does not appear to be an Ogg bitstream.\n");
		}
		ogg_sync_clear(&s->oy);
		return -1;
	}

	ogg_stream_init(&s->os, ogg_page_serialno(&s->og));
	if (ogg_stream_pagein(&s->os, &s->og) < 0) {
		ast_log(LOG_ERROR, "Error reading first page of Ogg bitstream data.\n");
		goto error;
	}

	if (read_packet(fs) < 0) {
		ast_log(LOG_ERROR, "Error reading initial header packet.\n");
		goto error;
	}

	hdr = speex_packet_to_header((char*)s->op.packet, s->op.bytes);
	if (memcmp(hdr->speex_string, "Speex   ", 8)) {
		ast_log(LOG_ERROR, "OGG container does not contain Speex audio!\n");
		goto error;
	}
	if (hdr->frames_per_packet != 1) {
		ast_log(LOG_ERROR, "Only one frame-per-packet OGG/Speex files are currently supported!\n");
		goto error;
	}
	if (hdr->nb_channels != 1) {
		ast_log(LOG_ERROR, "Only monophonic OGG/Speex files are currently supported!\n");
		goto error;
	}
	if (hdr->rate != expected_rate) {
		ast_log(LOG_ERROR, "Unexpected sampling rate (%d != %d)!\n",
			hdr->rate, expected_rate);
		goto error;
	}

	/* this packet is the comment */
	if (read_packet(fs) < 0) {
		ast_log(LOG_ERROR, "Error reading comment packet.\n");
		goto error;
	}
	for (i = 0; i < hdr->extra_headers; i++) {
		if (read_packet(fs) < 0) {
			ast_log(LOG_ERROR, "Error reading extra header packet %d.\n", i+1);
			goto error;
		}
	}
	speex_header_free(hdr);

	return 0;
error:
	if (hdr) {
		speex_header_free(hdr);
	}
	ogg_stream_clear(&s->os);
	ogg_sync_clear(&s->oy);
	return -1;
}
static enum ast_format_cmp_res silk_cmp(const struct ast_format *format1, const struct ast_format *format2)
{
	if (ast_format_get_sample_rate(format1) == ast_format_get_sample_rate(format2)) {
		return AST_FORMAT_CMP_EQUAL;
	}

	return AST_FORMAT_CMP_NOT_EQUAL;
}
Example #3
0
/*! \brief Helper function which gets the format for a Snoop channel based on the channel being snooped on */
static void snoop_determine_format(struct ast_channel *chan, struct stasis_app_snoop *snoop)
{
	SCOPED_CHANNELLOCK(lock, chan);
	unsigned int rate = MAX(ast_format_get_sample_rate(ast_channel_rawwriteformat(chan)),
		ast_format_get_sample_rate(ast_channel_rawreadformat(chan)));

	snoop->spy_format = ast_format_cache_get_slin_by_rate(rate);
}
Example #4
0
static int speex_callback(struct ast_audiohook *audiohook, struct ast_channel *chan, struct ast_frame *frame, enum ast_audiohook_direction direction)
{
	struct ast_datastore *datastore = NULL;
	struct speex_direction_info *sdi = NULL;
	struct speex_info *si = NULL;
	char source[80];

	/* If the audiohook is stopping it means the channel is shutting down.... but we let the datastore destroy take care of it */
	if (audiohook->status == AST_AUDIOHOOK_STATUS_DONE || frame->frametype != AST_FRAME_VOICE) {
		return -1;
	}

	/* We are called with chan already locked */
	if (!(datastore = ast_channel_datastore_find(chan, &speex_datastore, NULL))) {
		return -1;
	}

	si = datastore->data;

	sdi = (direction == AST_AUDIOHOOK_DIRECTION_READ) ? si->rx : si->tx;

	if (!sdi) {
		return -1;
	}

	if ((sdi->samples != frame->samples) || (ast_format_get_sample_rate(frame->subclass.format) != si->lastrate)) {
		si->lastrate = ast_format_get_sample_rate(frame->subclass.format);
		if (sdi->state) {
			speex_preprocess_state_destroy(sdi->state);
		}

		if (!(sdi->state = speex_preprocess_state_init((sdi->samples = frame->samples), si->lastrate))) {
			return -1;
		}

		speex_preprocess_ctl(sdi->state, SPEEX_PREPROCESS_SET_AGC, &sdi->agc);

		if (sdi->agc) {
			speex_preprocess_ctl(sdi->state, SPEEX_PREPROCESS_SET_AGC_LEVEL, &sdi->agclevel);
		}

		speex_preprocess_ctl(sdi->state, SPEEX_PREPROCESS_SET_DENOISE, &sdi->denoise);
	}

	speex_preprocess(sdi->state, frame->data.ptr, NULL);
	snprintf(source, sizeof(source), "%s/speex", frame->src);
	if (frame->mallocd & AST_MALLOCD_SRC) {
		ast_free((char *) frame->src);
	}
	frame->src = ast_strdup(source);
	frame->mallocd |= AST_MALLOCD_SRC;

	return 0;
}
static struct ast_format *silk_getjoint(const struct ast_format *format1, const struct ast_format *format2)
{
	struct silk_attr *attr1 = ast_format_get_attribute_data(format1);
	struct silk_attr *attr2 = ast_format_get_attribute_data(format2);
	struct ast_format *jointformat;
	struct silk_attr *attr_res;

	if (ast_format_get_sample_rate(format1) != ast_format_get_sample_rate(format2)) {
		return NULL;
	}

	jointformat = ast_format_clone(format1);
	if (!jointformat) {
		return NULL;
	}
	attr_res = ast_format_get_attribute_data(jointformat);

	if (!attr1 || !attr2) {
		attr_init(attr_res);
	} else {
		/* Take the lowest max bitrate */
		attr_res->maxbitrate = MIN(attr1->maxbitrate, attr2->maxbitrate);

		/* Only do dtx if both sides want it. DTX is a trade off between
		 * computational complexity and bandwidth. */
		attr_res->dtx = attr1->dtx && attr2->dtx ? 1 : 0;

		/* Only do FEC if both sides want it.  If a peer specifically requests not
		 * to receive with FEC, it may be a waste of bandwidth. */
		attr_res->fec = attr1->fec && attr2->fec ? 1 : 0;

		/* Use the maximum packetloss percentage between the two attributes. This affects how
		 * much redundancy is used in the FEC. */
		attr_res->packetloss_percentage = MAX(attr1->packetloss_percentage, attr2->packetloss_percentage);
	}

	return jointformat;
}
Example #6
0
/*! \internal \brief Enable talk detection on the channel */
static int set_talk_detect(struct ast_channel *chan, int dsp_silence_threshold, int dsp_talking_threshold)
{
	struct ast_datastore *datastore = NULL;
	struct talk_detect_params *td_params;
	SCOPED_CHANNELLOCK(chan_lock, chan);

	datastore = ast_channel_datastore_find(chan, &talk_detect_datastore, NULL);
	if (!datastore) {
		datastore = ast_datastore_alloc(&talk_detect_datastore, NULL);
		if (!datastore) {
			return -1;
		}

		td_params = ast_calloc(1, sizeof(*td_params));
		if (!td_params) {
			ast_datastore_free(datastore);
			return -1;
		}

		ast_audiohook_init(&td_params->audiohook,
		                   AST_AUDIOHOOK_TYPE_MANIPULATE,
		                   "TALK_DETECT",
		                   AST_AUDIOHOOK_MANIPULATE_ALL_RATES);
		td_params->audiohook.manipulate_callback = talk_detect_audiohook_cb;
		ast_set_flag(&td_params->audiohook, AST_AUDIOHOOK_TRIGGER_READ);

		td_params->dsp = ast_dsp_new_with_rate(ast_format_get_sample_rate(ast_channel_rawreadformat(chan)));
		if (!td_params->dsp) {
			ast_datastore_free(datastore);
			ast_free(td_params);
			return -1;
		}
		datastore->data = td_params;

		ast_channel_datastore_add(chan, datastore);
		ast_audiohook_attach(chan, &td_params->audiohook);
	} else {
		/* Talk detection already enabled; update existing settings */
		td_params = datastore->data;
	}

	td_params->dsp_talking_threshold = dsp_talking_threshold;
	td_params->dsp_silence_threshold = dsp_silence_threshold;

	ast_dsp_set_threshold(td_params->dsp, td_params->dsp_talking_threshold);

	return 0;
}
Example #7
0
static int mp3_exec(struct ast_channel *chan, const char *data)
{
	int res=0;
	int fds[2];
	int ms = -1;
	int pid = -1;
	RAII_VAR(struct ast_format *, owriteformat, NULL, ao2_cleanup);
	int timeout = 2000;
	struct timeval next;
	struct ast_frame *f;
	struct myframe {
		struct ast_frame f;
		char offset[AST_FRIENDLY_OFFSET];
		short frdata[160];
	} myf = {
		.f = { 0, },
	};
	struct ast_format * native_format;
	unsigned int sampling_rate;
	struct ast_format * write_format;

	if (ast_strlen_zero(data)) {
		ast_log(LOG_WARNING, "MP3 Playback requires an argument (filename)\n");
		return -1;
	}

	if (pipe(fds)) {
		ast_log(LOG_WARNING, "Unable to create pipe\n");
		return -1;
	}
	
	ast_stopstream(chan);

	native_format = ast_format_cap_get_format(ast_channel_nativeformats(chan), 0);
	sampling_rate = ast_format_get_sample_rate(native_format);
	write_format = ast_format_cache_get_slin_by_rate(sampling_rate);

	owriteformat = ao2_bump(ast_channel_writeformat(chan));
	res = ast_set_write_format(chan, write_format);
	if (res < 0) {
		ast_log(LOG_WARNING, "Unable to set write format to signed linear\n");
		return -1;
	}

	myf.f.frametype = AST_FRAME_VOICE;
	myf.f.subclass.format = write_format;
	myf.f.mallocd = 0;
	myf.f.offset = AST_FRIENDLY_OFFSET;
	myf.f.src = __PRETTY_FUNCTION__;
	myf.f.delivery.tv_sec = 0;
	myf.f.delivery.tv_usec = 0;
	myf.f.data.ptr = myf.frdata;
	
	res = mp3play(data, sampling_rate, fds[1]);
	if (!strncasecmp(data, "http://", 7)) {
		timeout = 10000;
	}
	/* Wait 1000 ms first */
	next = ast_tvnow();
	next.tv_sec += 1;
	if (res >= 0) {
		pid = res;
		/* Order is important -- there's almost always going to be mp3...  we want to prioritize the
		   user */
		for (;;) {
			ms = ast_tvdiff_ms(next, ast_tvnow());
			if (ms <= 0) {
				res = timed_read(fds[0], myf.frdata, sizeof(myf.frdata), timeout);
				if (res > 0) {
					myf.f.datalen = res;
					myf.f.samples = res / 2;
					if (ast_write(chan, &myf.f) < 0) {
						res = -1;
						break;
					}
				} else {
					ast_debug(1, "No more mp3\n");
					res = 0;
					break;
				}
				next = ast_tvadd(next, ast_samp2tv(myf.f.samples, sampling_rate));
			} else {
				ms = ast_waitfor(chan, ms);
				if (ms < 0) {
					ast_debug(1, "Hangup detected\n");
					res = -1;
					break;
				}
				if (ms) {
					f = ast_read(chan);
					if (!f) {
						ast_debug(1, "Null frame == hangup() detected\n");
						res = -1;
						break;
					}
					if (f->frametype == AST_FRAME_DTMF) {
						ast_debug(1, "User pressed a key\n");
						ast_frfree(f);
						res = 0;
						break;
					}
					ast_frfree(f);
				} 
			}
		}
	}
	close(fds[0]);
	close(fds[1]);
	
	if (pid > -1)
		kill(pid, SIGKILL);
	if (!res && owriteformat)
		ast_set_write_format(chan, owriteformat);

	ast_frfree(&myf.f);
	
	return res;
}
Example #8
0
struct ast_channel *stasis_app_control_snoop(struct ast_channel *chan,
	enum stasis_app_snoop_direction spy, enum stasis_app_snoop_direction whisper,
	const char *app, const char *app_args, const char *snoop_id)
{
	RAII_VAR(struct stasis_app_snoop *, snoop, NULL, ao2_cleanup);
	struct ast_format_cap *caps;
	pthread_t thread;
	struct ast_assigned_ids assignedids = {
		.uniqueid = snoop_id,
	};

	if (spy == STASIS_SNOOP_DIRECTION_NONE &&
		whisper == STASIS_SNOOP_DIRECTION_NONE) {
		return NULL;
	}

	snoop = ao2_alloc_options(sizeof(*snoop), snoop_destroy, AO2_ALLOC_OPT_LOCK_NOLOCK);
	if (!snoop) {
		return NULL;
	}

	/* Allocate a buffer to store the Stasis application and arguments in */
	snoop->app = ast_str_create(64);
	if (!snoop->app) {
		return NULL;
	}

	ast_str_set(&snoop->app, 0, "%s", app);
	if (!ast_strlen_zero(app_args)) {
		ast_str_append(&snoop->app, 0, ",%s", app_args);
	}

	/* Set up a timer for the Snoop channel so it wakes up at a specific interval */
	snoop->timer = ast_timer_open();
	if (!snoop->timer) {
		return NULL;
	}
	ast_timer_set_rate(snoop->timer, 1000 / SNOOP_INTERVAL);

	/* Determine which signed linear format should be used */
	snoop_determine_format(chan, snoop);

	/* Allocate a Snoop channel and set up various parameters */
	snoop->chan = ast_channel_alloc(1, AST_STATE_UP, "", "", "", "", "", &assignedids, NULL, 0, "Snoop/%s-%08x", ast_channel_uniqueid(chan),
		(unsigned)ast_atomic_fetchadd_int((int *)&chan_idx, +1));
	if (!snoop->chan) {
		return NULL;
	}

	ast_copy_string(snoop->uniqueid, ast_channel_uniqueid(chan), sizeof(snoop->uniqueid));

	/* To keep the channel valid on the Snoop structure until it is destroyed we bump the ref up here */
	ast_channel_ref(snoop->chan);

	ast_channel_tech_set(snoop->chan, &snoop_tech);
	ao2_ref(snoop, +1);
	ast_channel_tech_pvt_set(snoop->chan, snoop);
	ast_channel_set_fd(snoop->chan, 0, ast_timer_fd(snoop->timer));

	/* The format on the Snoop channel will be this signed linear format, and it will never change */
	caps = ast_format_cap_alloc(AST_FORMAT_CAP_FLAG_DEFAULT);
	if (!caps) {
		ast_channel_unlock(snoop->chan);
		ast_hangup(snoop->chan);
		return NULL;
	}
	ast_format_cap_append(caps, snoop->spy_format, 0);
	ast_channel_nativeformats_set(snoop->chan, caps);
	ao2_ref(caps, -1);

	ast_channel_set_writeformat(snoop->chan, snoop->spy_format);
	ast_channel_set_rawwriteformat(snoop->chan, snoop->spy_format);
	ast_channel_set_readformat(snoop->chan, snoop->spy_format);
	ast_channel_set_rawreadformat(snoop->chan, snoop->spy_format);

	ast_channel_unlock(snoop->chan);

	if (spy != STASIS_SNOOP_DIRECTION_NONE) {
		if (snoop_setup_audiohook(chan, AST_AUDIOHOOK_TYPE_SPY, spy, &snoop->spy_direction, &snoop->spy)) {
			ast_hangup(snoop->chan);
			return NULL;
		}

		snoop->spy_samples = ast_format_get_sample_rate(snoop->spy_format) / (1000 / SNOOP_INTERVAL);
		snoop->spy_active = 1;
	}

	/* If whispering is enabled set up the audiohook */
	if (whisper != STASIS_SNOOP_DIRECTION_NONE) {
		if (snoop_setup_audiohook(chan, AST_AUDIOHOOK_TYPE_WHISPER, whisper, &snoop->whisper_direction, &snoop->whisper)) {
			ast_hangup(snoop->chan);
			return NULL;
		}

		snoop->whisper_active = 1;
	}

	/* Create the thread which services the Snoop channel */
	ao2_ref(snoop, +1);
	if (ast_pthread_create_detached_background(&thread, NULL, snoop_stasis_thread, snoop)) {
		ao2_cleanup(snoop);

		/* No other thread is servicing this channel so we can immediately hang it up */
		ast_hangup(snoop->chan);
		return NULL;
	}

	publish_chanspy_message(snoop, 1);

	/* The caller of this has a reference as well */
	return ast_channel_ref(snoop->chan);
}
Example #9
0
static struct ast_frame *hook_event_cb(struct ast_channel *chan, struct ast_frame *frame, enum ast_framehook_event event, void *data)
{
	struct jb_framedata *framedata = data;
	struct timeval now_tv;
	unsigned long now;
	int putframe = 0; /* signifies if audio frame was placed into the buffer or not */

	switch (event) {
	case AST_FRAMEHOOK_EVENT_READ:
		break;
	case AST_FRAMEHOOK_EVENT_ATTACHED:
	case AST_FRAMEHOOK_EVENT_DETACHED:
	case AST_FRAMEHOOK_EVENT_WRITE:
		return frame;
	}

	if (ast_channel_fdno(chan) == AST_JITTERBUFFER_FD && framedata->timer) {
		if (ast_timer_ack(framedata->timer, 1) < 0) {
			ast_log(LOG_ERROR, "Failed to acknowledge timer in jitter buffer\n");
			return frame;
		}
	}

	if (!frame) {
		return frame;
	}

	now_tv = ast_tvnow();
	now = ast_tvdiff_ms(now_tv, framedata->start_tv);

	if (frame->frametype == AST_FRAME_VOICE) {
		int res;
		struct ast_frame *jbframe;

		if (!ast_test_flag(frame, AST_FRFLAG_HAS_TIMING_INFO) || frame->len < 2 || frame->ts < 0) {
			/* only frames with timing info can enter the jitterbuffer */
			return frame;
		}

		jbframe = ast_frisolate(frame);
		ao2_replace(framedata->last_format, frame->subclass.format);

		if (frame->len && (frame->len != framedata->timer_interval)) {
			framedata->timer_interval = frame->len;
			ast_timer_set_rate(framedata->timer, 1000 / framedata->timer_interval);
		}
		if (!framedata->first) {
			framedata->first = 1;
			res = framedata->jb_impl->put_first(framedata->jb_obj, jbframe, now);
		} else {
			res = framedata->jb_impl->put(framedata->jb_obj, jbframe, now);
		}

		if (res == AST_JB_IMPL_OK) {
			if (jbframe != frame) {
				ast_frfree(frame);
			}
			frame = &ast_null_frame;
		} else if (jbframe != frame) {
			ast_frfree(jbframe);
		}
		putframe = 1;
	}

	if (frame->frametype == AST_FRAME_NULL) {
		int res;
		long next = framedata->jb_impl->next(framedata->jb_obj);

		/* If now is earlier than the next expected output frame
		 * from the jitterbuffer we may choose to pass on retrieving
		 * a frame during this read iteration.  The only exception
		 * to this rule is when an audio frame is placed into the buffer
		 * and the time for the next frame to come out of the buffer is
		 * at least within the timer_interval of the next output frame. By
		 * doing this we are able to feed off the timing of the input frames
		 * and only rely on our jitterbuffer timer when frames are dropped.
		 * During testing, this hybrid form of timing gave more reliable results. */
		if (now < next) {
			long int diff = next - now;
			if (!putframe) {
				return frame;
			} else if (diff >= framedata->timer_interval) {
				return frame;
			}
		}

		ast_frfree(frame);
		frame = &ast_null_frame;
		res = framedata->jb_impl->get(framedata->jb_obj, &frame, now, framedata->timer_interval);
		switch (res) {
		case AST_JB_IMPL_OK:
			/* got it, and pass it through */
			break;
		case AST_JB_IMPL_DROP:
			ast_frfree(frame);
			frame = &ast_null_frame;
			break;
		case AST_JB_IMPL_INTERP:
			if (framedata->last_format) {
				struct ast_frame tmp = { 0, };

				tmp.frametype = AST_FRAME_VOICE;
				tmp.subclass.format = framedata->last_format;
				/* example: 8000hz / (1000 / 20ms) = 160 samples */
				tmp.samples = ast_format_get_sample_rate(framedata->last_format) / (1000 / framedata->timer_interval);
				tmp.delivery = ast_tvadd(framedata->start_tv, ast_samp2tv(next, 1000));
				tmp.offset = AST_FRIENDLY_OFFSET;
				tmp.src  = "func_jitterbuffer interpolation";
				ast_frfree(frame);
				frame = ast_frdup(&tmp);
				break;
			}
			/* else fall through */
		case AST_JB_IMPL_NOFRAME:
			ast_frfree(frame);
			frame = &ast_null_frame;
			break;
		}
	}

	if (frame->frametype == AST_FRAME_CONTROL) {
		switch(frame->subclass.integer) {
		case AST_CONTROL_HOLD:
		case AST_CONTROL_UNHOLD:
		case AST_CONTROL_T38_PARAMETERS:
		case AST_CONTROL_SRCUPDATE:
		case AST_CONTROL_SRCCHANGE:
			framedata->jb_impl->force_resync(framedata->jb_obj);
			break;
		default:
			break;
		}
	}

	return frame;
}