示例#1
0
CAvConfigBase* CAvConfigManager::NameToConfClass(const char *name)
{
	std::map<std::string, CAvConfigBase*>::iterator it = m_config_name_confclass.find(name);
	if (it != m_config_name_confclass.end()) {
		return (*it).second;
	} else {
		av_error("NameToConfClass failed name[%s].\n", name);
	}
	return NULL;
}
示例#2
0
const char* CAvConfigManager::IdToName(int id)
{
	std::map<int, std::string>::iterator it = m_config_id_name.find(id);
	if(it != m_config_id_name.end()) {
        return (*it).second.c_str();
    } else {
        av_error("IdToName failed id = %d.\n", id);
        return NULL;
    }
}
示例#3
0
// call->master_lock held in W
int media_player_play_blob(struct media_player *mp, const str *blob) {
#ifdef WITH_TRANSCODING
	const char *err;
	int av_ret = 0;

	if (media_player_play_init(mp))
		return -1;

	mp->blob = str_dup(blob);
	err = "out of memory";
	if (!mp->blob)
		goto err;
	mp->read_pos = *mp->blob;

	err = "could not allocate AVFormatContext";
	mp->fmtctx = avformat_alloc_context();
	if (!mp->fmtctx)
		goto err;

	void *avio_buf = av_malloc(DEFAULT_AVIO_BUFSIZE);
	err = "failed to allocate AVIO buffer";
	if (!avio_buf)
		goto err;

	mp->avioctx = avio_alloc_context(avio_buf, DEFAULT_AVIO_BUFSIZE, 0, mp, __mp_avio_read,
			NULL, __mp_avio_seek);
	err = "failed to allocate AVIOContext";
	if (!mp->avioctx)
		goto err;

	mp->fmtctx->pb = mp->avioctx;

	// consumes allocated mp->fmtctx
	err = "failed to open AVFormatContext input";
	av_ret = avformat_open_input(&mp->fmtctx, "dummy", NULL, NULL);
	if (av_ret < 0)
		goto err;

	media_player_play_start(mp);

	return 0;

err:
	ilog(LOG_ERR, "Failed to start media playback from memory: %s", err);
	if (av_ret)
		ilog(LOG_ERR, "Error returned from libav: %s", av_error(av_ret));
#endif
	return -1;
}
示例#4
0
av_bool CAvConfigManager::LoadConfigFromFile(const char *path, std::string &conf)
{
	FILE *fp = fopen(path, "r");
	if (NULL == fp) {
		av_error("%s open fail\n", path);
		return av_false;
	}
	conf.clear();
	char * tmp = new char[12 * 1024 + 1];
	int len = 0;
	while (!feof(fp)) {
		len = fread(tmp, sizeof(char), 12 * 1024, fp);
		//conf += tmp;
		conf.append(tmp, len);
	}
	delete[]tmp;
	fclose(fp);
	return av_true;
}
示例#5
0
void CAvConfigManager::Initialize()
{
	m_timer = new TimerConfManager();
	m_change = av_false;
	m_config_data.reserve(512 * 1024);
	AvConfigReader reader;
	bool load_flag = false;
	std::string ConfigPath; 
	av_bool abRet = CAvDevice::GetEnv(std::string(EKey_ConfigsPath), ConfigPath);
	if (abRet == av_true){
		SetAvConfigPath(ConfigPath);
	}

	if (av_true == LoadConfigFromFile(m_ConfigFullPatch.c_str(), m_config_data)){
		load_flag = reader.parse(m_config_data, m_total);
	}
	if (false == load_flag) {
		av_msg("Load Configfile1 failed\n");
		if (av_true == LoadConfigFromFile(m_ConfigFullPatchBu.c_str(), m_config_data)){
			load_flag = reader.parse(m_config_data, m_total);
		}
	}
	if (false == load_flag){
		av_error("Load all config file failed\n");
	}


	m_name_configIndex.clear();

	m_name_configIndex.insert(ConfIndexValueType("DeviceUart", CONF_DEVICE_UART));
	LoadConfig("DeviceUart", m_config_device_uart);

	
	m_name_configIndex.insert(ConfIndexValueType("Capture", CONF_CAPTURE_FORMATS));
	LoadConfig("Capture", m_ConfigCapture);


	m_name_configIndex.insert(ConfIndexValueType("Image", CONF_IMAGE_FORMATS));
	LoadConfig("Image", m_ConfigImage);

	m_name_configIndex.insert(ConfIndexValueType("Enocde", CONF_ENCODE_FORMATS));
	LoadConfig("Enocde", m_ConfigEncode);

	m_name_configIndex.insert(ConfIndexValueType("Cover", CONF_ENCODE_COVER));
	LoadConfig("Cover", m_ConfigCover);

	m_name_configIndex.insert(ConfIndexValueType("WaterMarking", CONF_ENCODE_WATERMARKING));
	LoadConfig("WaterMarking", m_ConfigWaterMark);

	m_name_configIndex.insert(ConfIndexValueType("AlarmIo", CONF_ALARM_IO));
	LoadConfig("AlarmIo", m_ConfigAlarmIo);
	
	m_name_configIndex.insert(ConfIndexValueType("AlarmMd", CONF_ALARM_MD));
	LoadConfig("AlarmMd", m_ConfigAlarmMd);

	m_name_configIndex.insert(ConfIndexValueType("NetComm", CONF_NET_COMM));
	LoadConfig("NetComm", m_ConfigNetComm);

	m_name_configIndex.insert(ConfIndexValueType("NetProtocol", CONF_NETPROTOCOL));
	LoadConfig("NetProtocol", m_ConfigNetProtocol);

	m_name_configIndex.insert(ConfIndexValueType("Audio", CONF_AUDIO));
	LoadConfig("Audio", m_ConfigAudio);
	


	//User Manager
	m_name_configIndex.insert(ConfIndexValueType("UserManager", CONF_USER_MANAGER));
	LoadConfig("UserManager", m_config_user_manager);
	//Net Server Email
	m_name_configIndex.insert(ConfIndexValueType("NetServerSmtp", CONF_NET_SER_EMAIL));
	LoadConfig("NetServerSmtp", m_confignet_smtp);

	//ftp
	m_name_configIndex.insert(ConfIndexValueType("NetServerFtp", CONF_NET_SER_FTP));
	LoadConfig("NetServerFtp", m_confignet_ftp);
	m_name_configIndex.insert(ConfIndexValueType("NetServerDdns", CONF_NET_SER_DDNS));
	LoadConfig("NetServerDdns", m_confignet_ddns);
	m_name_configIndex.insert(ConfIndexValueType("NetServerUpnp", CONF_NET_SER_UPNP));
	LoadConfig("NetServerUpnp", m_confignet_upnp);
	m_name_configIndex.insert(ConfIndexValueType("NetServerNtp", CONF_NET_SER_NTP));
	LoadConfig("NetServerNtp", m_confignet_ntp);

	if (false == load_flag){
		Save();
	}
	
}
示例#6
0
static int __ensure_codec_handler(struct media_player *mp, AVStream *avs) {
	if (mp->handler)
		return 0;

	// synthesise rtp payload type
	struct rtp_payload_type src_pt = { .payload_type = -1 };
	// src_pt.codec_def = codec_find_by_av(avs->codec->codec_id);  `codec` is deprecated
	src_pt.codec_def = codec_find_by_av(avs->CODECPAR->codec_id);
	if (!src_pt.codec_def) {
		ilog(LOG_ERR, "Attempting to play media from an unsupported file format/codec");
		return -1;
	}
	src_pt.encoding = src_pt.codec_def->rtpname_str;
	src_pt.channels = avs->CODECPAR->channels;
	src_pt.clock_rate = avs->CODECPAR->sample_rate;
	codec_init_payload_type(&src_pt, mp->media);

	// find suitable output payload type
	struct rtp_payload_type *dst_pt;
	for (GList *l = mp->media->codecs_prefs_send.head; l; l = l->next) {
		dst_pt = l->data;
		if (dst_pt->codec_def && !dst_pt->codec_def->supplemental)
			goto found;
	}
	dst_pt = NULL;
found:
	if (!dst_pt) {
		ilog(LOG_ERR, "No supported output codec found in SDP");
		return -1;
	}
	ilog(LOG_DEBUG, "Output codec for media playback is " STR_FORMAT,
			STR_FMT(&dst_pt->encoding_with_params));

	// if we played anything before, scale our sync TS according to the time
	// that has passed
	if (mp->sync_ts_tv.tv_sec) {
		long long ts_diff_us = timeval_diff(&rtpe_now, &mp->sync_ts_tv);
		mp->sync_ts += ts_diff_us * dst_pt->clock_rate / 1000000 / dst_pt->codec_def->clockrate_mult;
	}

	mp->handler = codec_handler_make_playback(&src_pt, dst_pt, mp->sync_ts);
	if (!mp->handler)
		return -1;

	mp->duration = avs->duration * 1000 * avs->time_base.num / avs->time_base.den;

	return 0;
}


// appropriate lock must be held
static void media_player_read_packet(struct media_player *mp) {
	if (!mp->fmtctx)
		return;

	int ret = av_read_frame(mp->fmtctx, &mp->pkt);
	if (ret < 0) {
		if (ret == AVERROR_EOF) {
			ilog(LOG_DEBUG, "EOF reading from media stream");
			return;
		}
		ilog(LOG_ERR, "Error while reading from media stream");
		return;
	}

	if (!mp->fmtctx->streams) {
		ilog(LOG_ERR, "No AVStream present in format context");
		goto out;
	}

	AVStream *avs = mp->fmtctx->streams[0];
	if (!avs) {
		ilog(LOG_ERR, "No AVStream present in format context");
		goto out;
	}

	if (__ensure_codec_handler(mp, avs))
		goto out;

	// scale pts and duration according to sample rate

	long long duration_scaled = mp->pkt.duration * avs->CODECPAR->sample_rate
		* avs->time_base.num / avs->time_base.den;
	unsigned long long pts_scaled = mp->pkt.pts * avs->CODECPAR->sample_rate
		* avs->time_base.num / avs->time_base.den;

	long long us_dur = mp->pkt.duration * 1000000LL * avs->time_base.num / avs->time_base.den;
	ilog(LOG_DEBUG, "read media packet: pts %llu duration %lli (scaled %llu/%lli, %lli us), "
			"sample rate %i, time_base %i/%i",
			(unsigned long long) mp->pkt.pts,
			(long long) mp->pkt.duration,
			pts_scaled,
			duration_scaled,
			us_dur,
			avs->CODECPAR->sample_rate,
			avs->time_base.num, avs->time_base.den);

	// synthesise fake RTP header and media_packet context

	struct rtp_header rtp = {
		.timestamp = pts_scaled, // taken verbatim by handler_func_playback w/o byte swap
		.seq_num = htons(mp->seq),
	};
	struct media_packet packet = {
		.tv = rtpe_now,
		.call = mp->call,
		.media = mp->media,
		.rtp = &rtp,
		.ssrc_out = mp->ssrc_out,
	};
	str_init_len(&packet.raw, (char *) mp->pkt.data, mp->pkt.size);
	packet.payload = packet.raw;

	mp->handler->func(mp->handler, &packet);

	// as this is timing sensitive and we may have spent some time decoding,
	// update our global "now" timestamp
	gettimeofday(&rtpe_now, NULL);

	// keep track of RTP timestamps and real clock. look at the last packet we received
	// and update our sync TS.
	if (packet.packets_out.head) {
		struct codec_packet *p = packet.packets_out.head->data;
		if (p->rtp) {
			mp->sync_ts = ntohl(p->rtp->timestamp);
			mp->sync_ts_tv = p->to_send;
		}
	}

	media_packet_encrypt(mp->crypt_handler->out->rtp_crypt, mp->sink, &packet);

	mutex_lock(&mp->sink->out_lock);
	if (media_socket_dequeue(&packet, mp->sink))
		ilog(LOG_ERR, "Error sending playback media to RTP sink");
	mutex_unlock(&mp->sink->out_lock);

	timeval_add_usec(&mp->next_run, us_dur);
	timerthread_obj_schedule_abs(&mp->tt_obj, &mp->next_run);

out:
	av_packet_unref(&mp->pkt);
}


// call->master_lock held in W
static int media_player_play_init(struct media_player *mp) {
	media_player_shutdown(mp);

	// find call media suitable for playback
	struct call_media *media;
	for (GList *l = mp->ml->medias.head; l; l = l->next) {
		media = l->data;
		if (media->type_id != MT_AUDIO)
			continue;
		if (!MEDIA_ISSET(media, SEND))
			continue;
		if (media->streams.length == 0)
			continue;
		goto found;
	}
	media = NULL;
found:
	if (!media) {
		ilog(LOG_ERR, "No suitable SDP section for media playback");
		return -1;
	}
	mp->media = media;
	mp->sink = media->streams.head->data;
	mp->crypt_handler = determine_handler(&transport_protocols[PROTO_RTP_AVP], media, 1);

	return 0;
}


// call->master_lock held in W
static void media_player_play_start(struct media_player *mp) {
	// needed to have usable duration for some formats. ignore errors.
	avformat_find_stream_info(mp->fmtctx, NULL);

	mp->next_run = rtpe_now;
	// give ourselves a bit of a head start with decoding
	timeval_add_usec(&mp->next_run, -50000);
	media_player_read_packet(mp);
}
#endif


// call->master_lock held in W
int media_player_play_file(struct media_player *mp, const str *file) {
#ifdef WITH_TRANSCODING
	if (media_player_play_init(mp))
		return -1;

	char file_s[PATH_MAX];
	snprintf(file_s, sizeof(file_s), STR_FORMAT, STR_FMT(file));

	int ret = avformat_open_input(&mp->fmtctx, file_s, NULL, NULL);
	if (ret < 0) {
		ilog(LOG_ERR, "Failed to open media file for playback: %s", av_error(ret));
		return -1;
	}

	media_player_play_start(mp);

	return 0;
#else
	return -1;
#endif
}


#ifdef WITH_TRANSCODING
static int __mp_avio_read_wrap(void *opaque, uint8_t *buf, int buf_size) {
	struct media_player *mp = opaque;
	if (buf_size < 0)
		return AVERROR(EINVAL);
	if (buf_size == 0)
		return 0;
	if (!mp->read_pos.len)
		return AVERROR_EOF;

	int len = buf_size;
	if (len > mp->read_pos.len)
		len = mp->read_pos.len;
	memcpy(buf, mp->read_pos.s, len);
	str_shift(&mp->read_pos, len);
	return len;
}
static int __mp_avio_read(void *opaque, uint8_t *buf, int buf_size) {
	ilog(LOG_DEBUG, "__mp_avio_read(%i)", buf_size);
	int ret = __mp_avio_read_wrap(opaque, buf, buf_size);
	ilog(LOG_DEBUG, "__mp_avio_read(%i) = %i", buf_size, ret);
	return ret;
}
示例#7
0
AVFrame *resample_frame(resample_t *resample, AVFrame *frame, const format_t *to_format) {
	const char *err;
	int errcode = 0;

	uint64_t to_channel_layout = av_get_default_channel_layout(to_format->channels);
	fix_frame_channel_layout(frame);

	if (frame->format != to_format->format)
		goto resample;
	if (frame->sample_rate != to_format->clockrate)
		goto resample;
	if (frame->channel_layout != to_channel_layout)
		goto resample;

	return av_frame_clone(frame);

resample:

	if (G_UNLIKELY(!resample->swresample)) {
		resample->swresample = swr_alloc_set_opts(NULL,
				to_channel_layout,
				to_format->format,
				to_format->clockrate,
				frame->channel_layout,
				frame->format,
				frame->sample_rate,
				0, NULL);
		err = "failed to alloc resample context";
		if (!resample->swresample)
			goto err;

		err = "failed to init resample context";
		if ((errcode = swr_init(resample->swresample)) < 0)
			goto err;
	}

	// get a large enough buffer for resampled audio - this should be enough so we don't
	// have to loop
	int dst_samples = av_rescale_rnd(swr_get_delay(resample->swresample, to_format->clockrate)
			+ frame->nb_samples,
				to_format->clockrate, frame->sample_rate, AV_ROUND_UP);

	AVFrame *swr_frame = av_frame_alloc();

	err = "failed to alloc resampling frame";
	if (!swr_frame)
		goto err;
	av_frame_copy_props(swr_frame, frame);
	swr_frame->format = to_format->format;
	swr_frame->channel_layout = to_channel_layout;
	swr_frame->nb_samples = dst_samples;
	swr_frame->sample_rate = to_format->clockrate;
	err = "failed to get resample buffers";
	if ((errcode = av_frame_get_buffer(swr_frame, 0)) < 0)
		goto err;

	int ret_samples = swr_convert(resample->swresample, swr_frame->extended_data,
				dst_samples,
				(const uint8_t **) frame->extended_data,
				frame->nb_samples);
	err = "failed to resample audio";
	if ((errcode = ret_samples) < 0)
		goto err;

	swr_frame->nb_samples = ret_samples;
	swr_frame->pts = av_rescale(frame->pts, to_format->clockrate, frame->sample_rate);
	return swr_frame;

err:
	if (errcode)
		ilog(LOG_ERR, "Error resampling: %s (%s)", err, av_error(errcode));
	else
		ilog(LOG_ERR, "Error resampling: %s", err);
	resample_shutdown(resample);
	return NULL;
}