Exemple #1
0
void RP_ConfirmChannelConnect(RTPStream *ch, GF_Err e)
{
	GF_NetworkCommand com;

	/*in case the channel has been disconnected while SETUP was issued&processed. We also could
	clean up the command stack*/
	if (!ch->channel) return;

	gf_term_on_connect(ch->owner->service, ch->channel, e);
	if (e != GF_OK || !ch->rtp_ch) return;

	/*success, overwrite SL config*/
	memset(&com, 0, sizeof(GF_NetworkCommand));
	com.command_type =	GF_NET_CHAN_RECONFIG;
	com.base.on_channel = ch->channel;

	gf_rtp_depacketizer_get_slconfig(ch->depacketizer, &com.cfg.sl_config);
	/*reconfig*/
	gf_term_on_command(ch->owner->service, &com, GF_OK);

	/*ISMACryp config*/
	if (ch->depacketizer->flags & GF_RTP_HAS_ISMACRYP) {
		memset(&com, 0, sizeof(GF_NetworkCommand));
		com.base.on_channel = ch->channel;
		com.command_type = GF_NET_CHAN_DRM_CFG;
		com.drm_cfg.scheme_type = ch->depacketizer->isma_scheme;
		com.drm_cfg.scheme_version = 1;
		/*not transported in SDP!!!*/
		com.drm_cfg.scheme_uri = NULL;
		com.drm_cfg.kms_uri = ch->depacketizer->key;
		gf_term_on_command(ch->owner->service, &com, GF_OK);
	}
}
Exemple #2
0
u32 RP_Thread(void *param)
{
	u32 i;
	GF_NetworkCommand com;
	RTSPSession *sess;
	RTPStream *ch;
	RTPClient *rtp = (RTPClient *)param;

	rtp->th_state = 1;
	com.command_type = GF_NET_CHAN_BUFFER_QUERY;
	while (rtp->th_state) {
		gf_mx_p(rtp->mx);

		/*fecth data on udp*/
		i=0;
		while ((ch = (RTPStream *)gf_list_enum(rtp->channels, &i))) {
			if ((ch->flags & RTP_EOS) || (ch->status!=RTP_Running) ) continue;
			/*for interleaved channels don't read too fast, query the buffer occupancy*/
			if (ch->flags & RTP_INTERLEAVED) {
				com.base.on_channel = ch->channel;
				gf_term_on_command(rtp->service, &com, GF_OK);
				/*if no buffering, use a default value (3 sec of data should do it)*/
				if (!com.buffer.max) com.buffer.max = 3000;
				if (com.buffer.occupancy <= com.buffer.max) ch->rtsp->flags |= RTSP_TCP_FLUSH;
			} else {
				RP_ReadStream(ch);
			}
		}

		/*and process commands / flush TCP*/
		i=0;
		while ((sess = (RTSPSession *)gf_list_enum(rtp->sessions, &i))) {
			RP_ProcessCommands(sess);

			if (sess->connect_error) {
				gf_term_on_connect(sess->owner->service, NULL, sess->connect_error);
				sess->connect_error = 0;
			}

		}

		gf_mx_v(rtp->mx);

		gf_sleep(1);
	}

	if (rtp->dnload) gf_term_download_del(rtp->dnload);
	rtp->dnload = NULL;

	rtp->th_state = 2;
	return 0;
}
Exemple #3
0
static void AC3_RegulateDataRate(AC3Reader *read)
{
	GF_NetworkCommand com;

	memset(&com, 0, sizeof(GF_NetworkCommand));
	com.command_type = GF_NET_CHAN_BUFFER_QUERY;
	com.base.on_channel = read->ch;
	while (read->ch) {
		gf_term_on_command(read->service, &com, GF_OK);
		if (com.buffer.occupancy < com.buffer.max) break;
		gf_sleep(2);
	}
}
Exemple #4
0
static void SAF_Regulate(SAFIn *read)
{
	GF_NetworkCommand com;
	SAFChannel *ch;

	com.command_type = GF_NET_CHAN_BUFFER_QUERY;
	/*sleep untill the buffer occupancy is too low - note that this work because all streams in this
	demuxer are synchronized*/
	while (read->run_state) {
		u32 min_occ = (u32) -1;
		u32 i=0;
		while ( (ch = (SAFChannel *)gf_list_enum(read->channels, &i))) {
			com.base.on_channel = ch->ch;
			gf_term_on_command(read->service, &com, GF_OK);
			if (com.buffer.occupancy < ch->buffer_min) return;
			if (com.buffer.occupancy) min_occ = MIN(min_occ, com.buffer.occupancy - ch->buffer_min);
		}
		if (min_occ == (u32) -1) break;
		//fprintf(stdout, "Regulating SAF demux - sleeping for %d ms\n", min_occ);
		gf_sleep(min_occ);
	}
}
Exemple #5
0
static void gf_m2ts_switch_quality(GF_M2TS_Program *prog, GF_M2TS_Demuxer *ts, Bool switch_up)
{
	GF_M2TS_ES *es;
	u32 i, count;
	GF_NetworkCommand com;

	if (!prog->is_scalable)
		return;

	if (switch_up) {
		for (i = 0; i < GF_M2TS_MAX_STREAMS; i++) {
			es = ts->ess[i];
			if (es && (es->flags & GF_M2TS_ES_IS_PES) && (((GF_M2TS_PES *)es)->depends_on_pid == prog->pid_playing)) {
				GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("Turn on ES%d\n", es->pid));
				gf_m2ts_set_pes_framing((GF_M2TS_PES *)ts->ess[es->pid], GF_M2TS_PES_FRAMING_DEFAULT);
				prog->pid_playing = es->pid;
				return;
			}
		}
	}
	else {
		count = gf_list_count(prog->streams);
		for (i = 0; i < count; i++) {
			es = (GF_M2TS_ES *)gf_list_get(prog->streams, i);
			if (es && (es->pid == prog->pid_playing) && ((GF_M2TS_PES *)es)->depends_on_pid) {
				GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("Turn off ES%d - playing ES%d\n", es->pid, ((GF_M2TS_PES *)es)->depends_on_pid));
				gf_m2ts_set_pes_framing((GF_M2TS_PES *)ts->ess[es->pid], GF_M2TS_PES_FRAMING_SKIP);
				memset(&com, 0, sizeof(com));
				com.command_type = GF_NET_CHAN_RESET;
				com.base.on_channel = ((GF_M2TS_PES *)es)->user;
				gf_term_on_command(((M2TSIn *)ts->user)->service, &com, GF_OK);
				prog->pid_playing = ((GF_M2TS_PES *)es)->depends_on_pid;
				return;
			}
		}
	}
}
Exemple #6
0
static GF_Err AC3_ServiceCommand(GF_InputService *plug, GF_NetworkCommand *com)
{
	AC3Reader *read = plug->priv;


	if (com->base.command_type==GF_NET_SERVICE_INFO) {
		com->info.name = read->icy_track_name ? read->icy_track_name : read->icy_name;
		com->info.comment = read->icy_genre;
		return GF_OK;
	}

	if (!com->base.on_channel) {
		/*if live session we may cache*/
		if (read->is_live && (com->command_type==GF_NET_IS_CACHABLE)) return GF_OK;
		return GF_NOT_SUPPORTED;
	}
	switch (com->command_type) {
	case GF_NET_CHAN_SET_PULL:
		if ((read->ch == com->base.on_channel) && read->is_live) return GF_NOT_SUPPORTED;
		return GF_OK;
	case GF_NET_CHAN_INTERACTIVE:
		if ((read->ch == com->base.on_channel) && read->is_live) return GF_NOT_SUPPORTED;
		return GF_OK;
	case GF_NET_CHAN_BUFFER:
		if ((read->ch == com->base.on_channel) && read->is_live) {
			if (com->buffer.max<1000) com->buffer.max = 1000;
			com->buffer.min = com->buffer.max/2;
		}
		return GF_OK;
	case GF_NET_CHAN_SET_PADDING:
		read->pad_bytes = com->pad.padding_bytes;
		return GF_OK;
	case GF_NET_CHAN_DURATION:
		com->duration.duration = read->duration;
		com->duration.duration /= read->sample_rate;
		return GF_OK;
	case GF_NET_CHAN_PLAY:
		read->start_range = com->play.start_range;
		read->end_range = com->play.end_range;
		read->current_time = 0;
		if (read->stream) gf_f64_seek(read->stream, 0, SEEK_SET);

		if (read->ch == com->base.on_channel) { 
			read->done = 0; 
			/*PLAY after complete download, estimate duration*/
			if (!read->is_remote && !read->duration) {
				AC3_ConfigureFromFile(read);
				if (read->duration) {
					GF_NetworkCommand rcfg;
					rcfg.base.on_channel = read->ch;
					rcfg.base.command_type = GF_NET_CHAN_DURATION;
					rcfg.duration.duration = read->duration;
					rcfg.duration.duration /= read->sample_rate;
					gf_term_on_command(read->service, &rcfg, GF_OK);
				}
			}
		}
		return GF_OK;
	case GF_NET_CHAN_STOP:
		return GF_OK;
	default:
		return GF_OK;
	}
}
Exemple #7
0
void AC3_NetIO(void *cbk, GF_NETIO_Parameter *param)
{
	GF_Err e;
	const char *szCache;
	u32 total_size, bytes_done;
	AC3Reader *read = (AC3Reader *) cbk;

	e = param->error;
	/*done*/
	if (param->msg_type==GF_NETIO_DATA_TRANSFERED) {
		if (read->stream) {
			read->is_remote = 0;
			e = GF_EOS;
		} else if (!read->needs_connection) {
			return;
		}
	} else if (param->msg_type==GF_NETIO_PARSE_HEADER) {
		if (!strcmp(param->name, "icy-name")) {
			if (read->icy_name) gf_free(read->icy_name);
			read->icy_name = gf_strdup(param->value);
		}
		if (!strcmp(param->name, "icy-genre")) {
			if (read->icy_genre) gf_free(read->icy_genre);
			read->icy_genre = gf_strdup(param->value);
		}
		if (!strcmp(param->name, "icy-meta")) {
			GF_NetworkCommand com;
			char *meta;
			if (read->icy_track_name) gf_free(read->icy_track_name);
			read->icy_track_name = NULL;
			meta = param->value;
			while (meta && meta[0]) {
				char *sep = strchr(meta, ';');
				if (sep) sep[0] = 0;
	
				if (!strnicmp(meta, "StreamTitle=", 12)) {
					read->icy_track_name = gf_strdup(meta+12);
				}
				if (!sep) break;
				sep[0] = ';';
				meta = sep+1;
			}

			com.base.command_type = GF_NET_SERVICE_INFO;
			gf_term_on_command(read->service, &com, GF_OK);
		}
		return;
	} else {
		/*handle service message*/
		gf_term_download_update_stats(read->dnload);
		if (param->msg_type!=GF_NETIO_DATA_EXCHANGE) return;
	}

	/*data fetching or EOS*/
	if (e >= GF_OK) {
		if (read->needs_connection) {
			gf_dm_sess_get_stats(read->dnload, NULL, NULL, &total_size, NULL, NULL, NULL);
			if (!total_size) read->is_live = 1;
		}
		if (read->is_live) {
			if (!e) AC3_OnLiveData(read, param->data, param->size);
			return;
		}
		if (read->stream) return;

		/*open service*/
		szCache = gf_dm_sess_get_cache_name(read->dnload);
		if (!szCache) e = GF_IO_ERR;
		else {
			read->stream = gf_f64_open((char *) szCache, "rb");
			if (!read->stream) e = GF_SERVICE_ERROR;
			else {
				/*if full file at once (in cache) parse duration*/
				if (e==GF_EOS) read->is_remote = 0;
				e = GF_OK;
				/*not enough data*/
				if (!AC3_ConfigureFromFile(read)) {
					/*get amount downloaded and check*/
					gf_dm_sess_get_stats(read->dnload, NULL, NULL, NULL, &bytes_done, NULL, NULL);
					if (bytes_done>10*1024) {
						e = GF_CORRUPTED_DATA;
					} else {
						fclose(read->stream);
						read->stream = NULL;
						return;
					}
				}
			}
		}
	}
	/*OK confirm*/
	if (read->needs_connection) {
		read->needs_connection = 0;
		gf_term_on_connect(read->service, NULL, e);
		if (!e) AC3_SetupObject(read);
	}
}
Exemple #8
0
static u32 FFDemux_Run(void *par)
{
	AVPacket pkt;
	s64 seek_to;
	GF_NetworkCommand com;
	GF_NetworkCommand map;
	GF_SLHeader slh;
	FFDemux *ffd = (FFDemux *) par;

	memset(&map, 0, sizeof(GF_NetworkCommand));
	map.command_type = GF_NET_CHAN_MAP_TIME;

	memset(&com, 0, sizeof(GF_NetworkCommand));
	com.command_type = GF_NET_BUFFER_QUERY;

	memset(&slh, 0, sizeof(GF_SLHeader));

	slh.compositionTimeStampFlag = slh.decodingTimeStampFlag = 1;

	while (ffd->is_running) {
		if ((!ffd->video_ch && (ffd->video_st>=0)) || (!ffd->audio_ch && (ffd->audio_st>=0))) {
			gf_sleep(100);
			continue;
		}

		if ((ffd->seek_time>=0) && ffd->seekable) {
			seek_to = (s64) (AV_TIME_BASE*ffd->seek_time);
			av_seek_frame(ffd->ctx, -1, seek_to, AVSEEK_FLAG_BACKWARD);
			ffd->seek_time = -1;
		}
		pkt.stream_index = -1;
		/*EOF*/
		if (av_read_frame(ffd->ctx, &pkt) <0) break;
		if (pkt.pts == AV_NOPTS_VALUE) pkt.pts = pkt.dts;
		if (!pkt.dts) pkt.dts = pkt.pts;

		slh.compositionTimeStamp = pkt.pts;
		slh.decodingTimeStamp = pkt.dts;

		gf_mx_p(ffd->mx);
		/*blindly send audio as soon as video is init*/
		if (ffd->audio_ch && (pkt.stream_index == ffd->audio_st) ) {
//			u64 seek_audio = ffd->seek_time ? (u64) (s64) (ffd->seek_time*ffd->audio_tscale.den) : 0;
			slh.compositionTimeStamp *= ffd->audio_tscale.num;
			slh.decodingTimeStamp *= ffd->audio_tscale.num;

#if 0
			if (slh.compositionTimeStamp < seek_audio) {
				slh.decodingTimeStamp = slh.compositionTimeStamp = seek_audio;
			}
#endif
			gf_term_on_sl_packet(ffd->service, ffd->audio_ch, (char *) pkt.data, pkt.size, &slh, GF_OK);
		}
		else if (ffd->video_ch && (pkt.stream_index == ffd->video_st)) {
//			u64 seek_video = ffd->seek_time ? (u64) (s64) (ffd->seek_time*ffd->video_tscale.den) : 0;
			slh.compositionTimeStamp *= ffd->video_tscale.num;
			slh.decodingTimeStamp *= ffd->video_tscale.num;

#if 0
			if (slh.compositionTimeStamp < seek_video) {
				slh.decodingTimeStamp = slh.compositionTimeStamp = seek_video;
			}
#endif
			gf_term_on_sl_packet(ffd->service, ffd->video_ch, (char *) pkt.data, pkt.size, &slh, GF_OK);
		}
		gf_mx_v(ffd->mx);
		av_free_packet(&pkt);

		/*sleep untill the buffer occupancy is too low - note that this work because all streams in this
		demuxer are synchronized*/
		while (ffd->audio_run || ffd->video_run) {
			gf_term_on_command(ffd->service, &com, GF_OK);
			if (com.buffer.occupancy < com.buffer.max)
				break;
			gf_sleep(10);

		}
		if (!ffd->audio_run && !ffd->video_run) break;
	}
	/*signal EOS*/
	if (ffd->audio_ch) gf_term_on_sl_packet(ffd->service, ffd->audio_ch, NULL, 0, NULL, GF_EOS);
	if (ffd->video_ch) gf_term_on_sl_packet(ffd->service, ffd->video_ch, NULL, 0, NULL, GF_EOS);
	ffd->is_running = 2;

	return 0;
}
Exemple #9
0
static u32 FFDemux_Run(void *par)
{
	AVPacket pkt;
	s64 seek_to;
	u64 seek_audio, seek_video;
	Bool video_init, do_seek, map_audio_time, map_video_time;
	GF_NetworkCommand com;
	GF_NetworkCommand map;
	GF_SLHeader slh;
	FFDemux *ffd = (FFDemux *) par;

	memset(&map, 0, sizeof(GF_NetworkCommand));
	map.command_type = GF_NET_CHAN_MAP_TIME;

	memset(&com, 0, sizeof(GF_NetworkCommand));
	com.command_type = GF_NET_CHAN_BUFFER_QUERY;

	memset(&slh, 0, sizeof(GF_SLHeader));

	slh.compositionTimeStampFlag = slh.decodingTimeStampFlag = 1;
	seek_to = (s64) (AV_TIME_BASE*ffd->seek_time);
	map_video_time = !ffd->seekable;

	video_init = (seek_to && ffd->video_ch) ? 0 : 1;
	seek_audio = seek_video = 0;
	if (ffd->seekable && (ffd->audio_st>=0)) seek_audio = (u64) (s64) (ffd->seek_time*ffd->audio_tscale.den);
	if (ffd->seekable && (ffd->video_st>=0)) seek_video = (u64) (s64) (ffd->seek_time*ffd->video_tscale.den);

	/*it appears that ffmpeg has trouble resyncing on some mpeg files - we trick it by restarting to 0 to get the
	first video frame, and only then seek*/
	if (ffd->seekable) av_seek_frame(ffd->ctx, -1, video_init ? seek_to : 0, AVSEEK_FLAG_BACKWARD);
	do_seek = !video_init;
	map_audio_time = video_init ? ffd->unreliable_audio_timing : 0;

	while (ffd->is_running) {

		pkt.stream_index = -1;
		/*EOF*/
		if (av_read_frame(ffd->ctx, &pkt) <0) break;
		if (pkt.pts == AV_NOPTS_VALUE) pkt.pts = pkt.dts;
		if (!pkt.dts) pkt.dts = pkt.pts;

		slh.compositionTimeStamp = pkt.pts;
		slh.decodingTimeStamp = pkt.dts;

		gf_mx_p(ffd->mx);
		/*blindly send audio as soon as video is init*/
		if (ffd->audio_ch && (pkt.stream_index == ffd->audio_st) && !do_seek) {
			slh.compositionTimeStamp *= ffd->audio_tscale.num;
			slh.decodingTimeStamp *= ffd->audio_tscale.num;

			if (map_audio_time) {
				map.base.on_channel = ffd->audio_ch;
				map.map_time.media_time = ffd->seek_time;
				/*mapwith TS=0 since we don't use SL*/
				map.map_time.timestamp = 0;
				map.map_time.reset_buffers = 1;
				map_audio_time = 0;
				gf_term_on_command(ffd->service, &map, GF_OK);
			}
			else if (slh.compositionTimeStamp < seek_audio) {
				slh.decodingTimeStamp = slh.compositionTimeStamp = seek_audio;
			}
			gf_term_on_sl_packet(ffd->service, ffd->audio_ch, pkt.data, pkt.size, &slh, GF_OK);
		}
		else if (ffd->video_ch && (pkt.stream_index == ffd->video_st)) {
			slh.compositionTimeStamp *= ffd->video_tscale.num;
			slh.decodingTimeStamp *= ffd->video_tscale.num;

			/*if we get pts = 0 after a seek the demuxer is reseting PTSs, so force map time*/
			if ((!do_seek && seek_to && !slh.compositionTimeStamp) || (map_video_time) ) {
				seek_to = 0;
				map_video_time = 0;

				map.base.on_channel = ffd->video_ch;
				map.map_time.timestamp = (u64) pkt.pts;
//				map.map_time.media_time = ffd->seek_time;
				map.map_time.media_time = 0;
				map.map_time.reset_buffers = 0;
				gf_term_on_command(ffd->service, &map, GF_OK);
			}
			else if (slh.compositionTimeStamp < seek_video) {
				slh.decodingTimeStamp = slh.compositionTimeStamp = seek_video;
			}
			gf_term_on_sl_packet(ffd->service, ffd->video_ch, pkt.data, pkt.size, &slh, GF_OK);
			video_init = 1;
		}
		gf_mx_v(ffd->mx);
		av_free_packet(&pkt);

		/*here's the trick - only seek after sending the first packets of each stream - this allows ffmpeg video decoders
		to resync properly*/
		if (do_seek && video_init && ffd->seekable) {
			av_seek_frame(ffd->ctx, -1, seek_to, AVSEEK_FLAG_BACKWARD);
			do_seek = 0;
			map_audio_time = ffd->unreliable_audio_timing;
		}
		/*sleep untill the buffer occupancy is too low - note that this work because all streams in this
		demuxer are synchronized*/
		while (1) {
			if (ffd->audio_ch) {
				com.base.on_channel = ffd->audio_ch;
				gf_term_on_command(ffd->service, &com, GF_OK);
				if (com.buffer.occupancy < ffd->data_buffer_ms) break;
			}
			if (ffd->video_ch) {
				com.base.on_channel = ffd->video_ch;
				gf_term_on_command(ffd->service, &com, GF_OK);
				if (com.buffer.occupancy < ffd->data_buffer_ms) break;
			}
			gf_sleep(10);

			/*escape if disconnect*/
			if (!ffd->audio_run && !ffd->video_run) break;
		}
		if (!ffd->audio_run && !ffd->video_run) break;
	}
	/*signal EOS*/
	if (ffd->audio_ch) gf_term_on_sl_packet(ffd->service, ffd->audio_ch, NULL, 0, NULL, GF_EOS);
	if (ffd->video_ch) gf_term_on_sl_packet(ffd->service, ffd->video_ch, NULL, 0, NULL, GF_EOS);
	ffd->is_running = 2;

	return 0;
}
Exemple #10
0
GF_Err ISOR_ConnectChannel(GF_InputService *plug, LPNETCHANNEL channel, const char *url, Bool upstream)
{
    u32 ESID;
    ISOMChannel *ch;
    GF_NetworkCommand com;
    u32 track;
    Bool is_esd_url;
    GF_Err e;
    ISOMReader *read;
    if (!plug || !plug->priv) return GF_SERVICE_ERROR;
    read = (ISOMReader *) plug->priv;

    track = 0;
    ch = NULL;
    is_esd_url = 0;
    e = GF_OK;
    if (upstream) {
        e = GF_ISOM_INVALID_FILE;
        goto exit;
    }
    if (!read->mov) return GF_SERVICE_ERROR;

    if (strstr(url, "ES_ID")) {
        sscanf(url, "ES_ID=%ud", &ESID);
    } else {
        /*handle url like mypath/myfile.mp4#trackID*/
        char *track_id = strrchr(url, '.');
        if (track_id) {
            track_id = strchr(url, '#');
            if (track_id) track_id ++;
        }
        is_esd_url = 1;

        ESID = 0;
        /*if only one track ok*/
        if (gf_isom_get_track_count(read->mov)==1) ESID = gf_isom_get_track_id(read->mov, 1);
        else if (track_id) {
            ESID = atoi(track_id);
            track = gf_isom_get_track_by_id(read->mov, (u32) ESID);
            if (!track) ESID = 0;
        }

    }
    if (!ESID) {
        e = GF_NOT_SUPPORTED;
        goto exit;
    }

    /*a channel cannot be open twice, it has to be closed before - NOTE a track is NOT a channel and the user can open
    several times the same track as long as a dedicated channel is used*/
    ch = isor_get_channel(read, channel);
    if (ch) {
        e = GF_SERVICE_ERROR;
        goto exit;
    }
    track = gf_isom_get_track_by_id(read->mov, (u32) ESID);
    if (!track) {
        e = GF_STREAM_NOT_FOUND;
        goto exit;
    }

    GF_SAFEALLOC(ch, ISOMChannel);
    ch->owner = read;
    ch->channel = channel;
    gf_list_add(read->channels, ch);
    ch->track = track;
    switch (gf_isom_get_media_type(ch->owner->mov, ch->track)) {
    case GF_ISOM_MEDIA_OCR:
        ch->streamType = GF_STREAM_OCR;
        break;
    case GF_ISOM_MEDIA_SCENE:
        ch->streamType = GF_STREAM_SCENE;
        break;
    }

    ch->has_edit_list = gf_isom_get_edit_segment_count(ch->owner->mov, ch->track) ? 1 : 0;
    ch->has_rap = (gf_isom_has_sync_points(ch->owner->mov, ch->track)==1) ? 1 : 0;
    ch->time_scale = gf_isom_get_media_timescale(ch->owner->mov, ch->track);

exit:
    gf_term_on_connect(read->service, channel, e);
    /*if esd url reconfig SL layer*/
    if (!e && is_esd_url) {
        GF_ESD *esd;
        memset(&com, 0, sizeof(GF_NetworkCommand));
        com.base.on_channel = channel;
        com.command_type = GF_NET_CHAN_RECONFIG;
        esd = gf_isom_get_esd(read->mov, ch->track, 1);
        if (esd) {
            memcpy(&com.cfg.sl_config, esd->slConfig, sizeof(GF_SLConfig));
            gf_odf_desc_del((GF_Descriptor *)esd);
        } else {
            com.cfg.sl_config.tag = GF_ODF_SLC_TAG;
            com.cfg.sl_config.timestampLength = 32;
            com.cfg.sl_config.timestampResolution = ch->time_scale;
            com.cfg.sl_config.useRandomAccessPointFlag = 1;
        }
        gf_term_on_command(read->service, &com, GF_OK);
    }
    if (!e && track && gf_isom_is_track_encrypted(read->mov, track)) {
        memset(&com, 0, sizeof(GF_NetworkCommand));
        com.base.on_channel = channel;
        com.command_type = GF_NET_CHAN_DRM_CFG;
        ch->is_encrypted = 1;
        if (gf_isom_is_ismacryp_media(read->mov, track, 1)) {
            gf_isom_get_ismacryp_info(read->mov, track, 1, NULL, &com.drm_cfg.scheme_type, &com.drm_cfg.scheme_version, &com.drm_cfg.scheme_uri, &com.drm_cfg.kms_uri, NULL, NULL, NULL);
            gf_term_on_command(read->service, &com, GF_OK);
        } else if (gf_isom_is_omadrm_media(read->mov, track, 1)) {
            gf_isom_get_omadrm_info(read->mov, track, 1, NULL, &com.drm_cfg.scheme_type, &com.drm_cfg.scheme_version, &com.drm_cfg.contentID, &com.drm_cfg.kms_uri, &com.drm_cfg.oma_drm_textual_headers, &com.drm_cfg.oma_drm_textual_headers_len, NULL, &com.drm_cfg.oma_drm_crypt_type, NULL, NULL, NULL);

            gf_media_get_file_hash(gf_isom_get_filename(read->mov), com.drm_cfg.hash);
            gf_term_on_command(read->service, &com, GF_OK);

        }
    }
    return e;
}
Exemple #11
0
GF_Err MPD_ConnectService(GF_InputService *plug, GF_ClientService *serv, const char *url)
{
	GF_MPD_In *mpdin = (GF_MPD_In*) plug->priv;
	const char *opt;
	GF_Err e;
	s32 shift_utc_ms, debug_adaptation_set;
	u32 max_cache_duration, auto_switch_count, init_timeshift;
	Bool use_server_utc;
	GF_DASHInitialSelectionMode first_select_mode;
	Bool keep_files, disable_switching;

	GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MPD_IN] Received Service Connection request (%p) from terminal for %s\n", serv, url));

	if (!mpdin || !serv || !url)
		return GF_BAD_PARAM;

	mpdin->service = serv;

	mpdin->dash_io.udta = mpdin;
	mpdin->dash_io.delete_cache_file = mpdin_dash_io_delete_cache_file;
	mpdin->dash_io.create = mpdin_dash_io_create;
	mpdin->dash_io.del = mpdin_dash_io_del;
	mpdin->dash_io.abort = mpdin_dash_io_abort;
	mpdin->dash_io.setup_from_url = mpdin_dash_io_setup_from_url;
	mpdin->dash_io.set_range = mpdin_dash_io_set_range;
	mpdin->dash_io.init = mpdin_dash_io_init;
	mpdin->dash_io.run = mpdin_dash_io_run;
	mpdin->dash_io.get_url = mpdin_dash_io_get_url;
	mpdin->dash_io.get_cache_name = mpdin_dash_io_get_cache_name;
	mpdin->dash_io.get_mime = mpdin_dash_io_get_mime;
	mpdin->dash_io.get_header_value = mpdin_dash_io_get_header_value;
	mpdin->dash_io.get_utc_start_time = mpdin_dash_io_get_utc_start_time;
	mpdin->dash_io.get_bytes_per_sec = mpdin_dash_io_get_bytes_per_sec;
	mpdin->dash_io.get_total_size = mpdin_dash_io_get_total_size;
	mpdin->dash_io.get_bytes_done = mpdin_dash_io_get_bytes_done;
	mpdin->dash_io.on_dash_event = mpdin_dash_io_on_dash_event;

	max_cache_duration = 0;
	opt = gf_modules_get_option((GF_BaseInterface *)plug, "Network", "BufferLength");
	if (opt) max_cache_duration = atoi(opt);

	auto_switch_count = 0;
	opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "AutoSwitchCount");
	if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "AutoSwitchCount", "0");
	if (opt) auto_switch_count = atoi(opt);

	keep_files = 0;
	opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "KeepFiles");
	if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "KeepFiles", "no");
	if (opt && !strcmp(opt, "yes")) keep_files = 1;

	disable_switching = 0;
	opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "DisableSwitching");
	if (opt && !strcmp(opt, "yes")) disable_switching = 1;

	first_select_mode = 0;
	opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "StartRepresentation");
	if (!opt) {
		gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "StartRepresentation", "minBandwidth");
		opt = "minBandwidth";
	}
	if (opt && !strcmp(opt, "maxBandwidth")) first_select_mode = GF_DASH_SELECT_BANDWIDTH_HIGHEST;
	else if (opt && !strcmp(opt, "minQuality")) first_select_mode = GF_DASH_SELECT_QUALITY_LOWEST;
	else if (opt && !strcmp(opt, "maxQuality")) first_select_mode = GF_DASH_SELECT_QUALITY_HIGHEST;
	else first_select_mode = GF_DASH_SELECT_BANDWIDTH_LOWEST;

	opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "MemoryStorage");
	if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "MemoryStorage", "yes");
	mpdin->memory_storage = (opt && !strcmp(opt, "yes")) ? 1 : 0;

	opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "UseMaxResolution");
	if (!opt) {
#if defined(_WIN32_WCE) || defined(GPAC_ANDROID) || defined(GPAC_IPHONE)
		opt = "yes";
#else
		opt = "no";
#endif
		gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "UseMaxResolution", opt);
	}
	mpdin->use_max_res = !strcmp(opt, "yes") ? 1 : 0;

	opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "ImmediateSwitching");
	if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "ImmediateSwitching", "no");
	mpdin->immediate_switch = (opt && !strcmp(opt, "yes")) ? 1 : 0;

	opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "BufferingMode");
	if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "BufferingMode", "minBuffer");

	if (opt && !strcmp(opt, "segments")) mpdin->buffer_mode = MPDIN_BUFFER_SEGMENTS;
	else if (opt && !strcmp(opt, "none")) mpdin->buffer_mode = MPDIN_BUFFER_NONE;
	else mpdin->buffer_mode = MPDIN_BUFFER_MIN;


	opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "LowLatency");
	if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "LowLatency", "no");

	if (opt && !strcmp(opt, "chunk") ) mpdin->use_low_latency = 1;
	else if (opt && !strcmp(opt, "always") ) mpdin->use_low_latency = 2;
	else mpdin->use_low_latency = 0;

	opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "AllowAbort");
	if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "AllowAbort", "no");
	mpdin->allow_http_abort = (opt && !strcmp(opt, "yes")) ? GF_TRUE : GF_FALSE;

	opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "ShiftClock");
	if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "ShiftClock", "0");
	shift_utc_ms = opt ? atoi(opt) : 0;

	opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "UseServerUTC");
	if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "UseServerUTC", "yes");
	use_server_utc = (opt && !strcmp(opt, "yes")) ? 1 : 0;

	mpdin->in_seek = 0;
	mpdin->previous_start_range = -1;

	init_timeshift = 0;
	opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "InitialTimeshift");
	if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "InitialTimeshift", "0");
	if (opt) init_timeshift = atoi(opt);

	mpdin->dash = gf_dash_new(&mpdin->dash_io, max_cache_duration, auto_switch_count, keep_files, disable_switching, first_select_mode, (mpdin->buffer_mode == MPDIN_BUFFER_SEGMENTS) ? 1 : 0, init_timeshift);

	if (!mpdin->dash) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[MPD_IN] Error - cannot create DASH Client for %s\n", url));
		gf_term_on_connect(mpdin->service, NULL, GF_IO_ERR);
		return GF_OK;
	}

	gf_dash_set_utc_shift(mpdin->dash, shift_utc_ms);
	gf_dash_enable_utc_drift_compensation(mpdin->dash, use_server_utc);

	opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "UseScreenResolution");
	//default mode is no for the time being
	if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "UseScreenResolution", "no");
	if (!opt || !strcmp(opt, "yes")) {
		GF_NetworkCommand com;
		memset(&com, 0, sizeof(GF_NetworkCommand));
		com.base.command_type = GF_NET_SERVICE_MEDIA_CAP_QUERY;
		gf_term_on_command(serv, &com, GF_OK);

		com.mcaps.width = 1920;
		com.mcaps.height = 1080;
		if (com.mcaps.width && com.mcaps.height) {
			gf_dash_set_max_resolution(mpdin->dash, com.mcaps.width, com.mcaps.height, com.mcaps.display_bit_depth);
		}
	}

	opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "TimeBetween404");
	if (opt) {
		gf_dash_set_min_timeout_between_404(mpdin->dash, atoi(opt));
	}

	opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "SegmentExpirationThreshold");
	if (opt) {
		gf_dash_set_segment_expiration_threshold(mpdin->dash, atoi(opt));
	}


	opt = gf_modules_get_option((GF_BaseInterface *)plug, "DASH", "DebugAdaptationSet");
	if (!opt) gf_modules_set_option((GF_BaseInterface *)plug, "DASH", "DebugAdaptationSet", "-1");
	debug_adaptation_set = opt ? atoi(opt) : -1;

	gf_dash_debug_group(mpdin->dash, debug_adaptation_set);

	/*dash thread starts at the end of gf_dash_open */
	e = gf_dash_open(mpdin->dash, url);
	if (e) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("[MPD_IN] Error - cannot initialize DASH Client for %s: %s\n", url, gf_error_to_string(e) ));
		gf_term_on_connect(mpdin->service, NULL, e);
		return GF_OK;
	}
	return GF_OK;
}
Exemple #12
0
void RP_ProcessRTP(RTPStream *ch, char *pck, u32 size)
{
	GF_NetworkCommand com;
	GF_Err e;
	GF_RTPHeader hdr;
	u32 PayloadStart;
	ch->rtp_bytes += size;

	/*first decode RTP*/
	e = gf_rtp_decode_rtp(ch->rtp_ch, pck, size, &hdr, &PayloadStart);

	/*corrupted or NULL data*/
	if (e || (PayloadStart >= size)) {
		//gf_term_on_sl_packet(ch->owner->service, ch->channel, NULL, 0, NULL, GF_CORRUPTED_DATA);
		return;
	}

	/*if we must notify some timing, do it now. If the channel has no range, this should NEVER be called*/
	if (ch->check_rtp_time /*&& gf_rtp_is_active(ch->rtp_ch)*/) {
		Double ch_time;

		/*it may happen that we still receive packets from a previous "play" request. If this is the case,
		filter until we reach the indicated rtptime*/
		if (ch->rtp_ch->rtp_time
		        && (ch->rtp_ch->rtp_first_SN > hdr.SequenceNumber)
		        && (ch->rtp_ch->rtp_time < hdr.TimeStamp)
		   ) {
			GF_LOG(GF_LOG_WARNING, GF_LOG_RTP, ("[RTP] Rejecting too early packet (TS %d vs signaled rtp time %d - diff %d ms)\n",
			                                    hdr.TimeStamp, ch->rtp_ch->rtp_time, ((hdr.TimeStamp - ch->rtp_ch->rtp_time)*1000) / ch->rtp_ch->TimeScale));
			return;
		}

		ch_time = gf_rtp_get_current_time(ch->rtp_ch);

		/*this is the first packet on the channel (no PAUSE)*/
		if (ch->check_rtp_time == RTP_SET_TIME_RTP) {
			/*Note: in a SEEK with RTSP, the rtp-info time given by the server is
			the rtp time of the desired range. But the server may (and should) send from
			the previous I frame on video, so the time of the first rtp packet after
			a SEEK can actually be less than CurrentStart. We don't drop these
			packets in order to see the maximum video. We could drop it, this would mean
			wait for next RAP...*/

			memset(&com, 0, sizeof(com));
			com.command_type = GF_NET_CHAN_MAP_TIME;
			com.base.on_channel = ch->channel;
			if (ch->rtsp) {
				com.map_time.media_time = ch->current_start + ch_time;
			} else {
				com.map_time.media_time = 0;
			}

			com.map_time.timestamp = hdr.TimeStamp;
			com.map_time.reset_buffers = 0;
			gf_term_on_command(ch->owner->service, &com, GF_OK);

			GF_LOG(GF_LOG_INFO, GF_LOG_RTP, ("[RTP] Mapping RTP Time seq %d TS %d Media Time %g - rtp info seq %d TS %d\n",
			                                 hdr.SequenceNumber, hdr.TimeStamp, com.map_time.media_time, ch->rtp_ch->rtp_first_SN, ch->rtp_ch->rtp_time
			                                ));

			/*skip RTCP clock init when RTSP is used*/
			if (ch->rtsp) ch->rtcp_init = 1;

//			if (ch->depacketizer->payt==GF_RTP_PAYT_H264_AVC) ch->depacketizer->flags |= GF_RTP_AVC_WAIT_RAP;
		}
		/*this is RESUME on channel, filter packet based on time (darwin seems to send
		couple of packet before)
		do not fetch if we're below 10 ms or <0, because this means we already have
		this packet - as the PAUSE is issued with the RTP currentTime*/
		else if (ch_time <= 0.021) {
			return;
		}
		ch->check_rtp_time = RTP_SET_TIME_NONE;
	}

	gf_rtp_depacketizer_process(ch->depacketizer, &hdr, pck + PayloadStart, size - PayloadStart);

	/*last check: signal EOS if we're close to end range in case the server do not send RTCP BYE*/
	if ((ch->flags & RTP_HAS_RANGE) && !(ch->flags & RTP_EOS) ) {
		/*also check last CTS*/
		Double ts = (Double) ((u32) ch->depacketizer->sl_hdr.compositionTimeStamp - hdr.TimeStamp);
		ts /= gf_rtp_get_clockrate(ch->rtp_ch);
		if (ABSDIFF(ch->range_end, (ts + ch->current_start + gf_rtp_get_current_time(ch->rtp_ch)) ) < 0.2) {
			ch->flags |= RTP_EOS;
			ch->stat_stop_time = gf_sys_clock();
			gf_term_on_sl_packet(ch->owner->service, ch->channel, NULL, 0, NULL, GF_EOS);
		}
	}
}
Exemple #13
0
static void M2TS_OnEvent(GF_M2TS_Demuxer *ts, u32 evt_type, void *param)
{
    GF_Event evt;
    M2TSIn *m2ts = (M2TSIn *) ts->user;

    switch (evt_type) {
    case GF_M2TS_EVT_PAT_UPDATE:
        /*	example code showing how to forward an event from MPEG-2 TS input service to GPAC user*/
#if 0
    {
        GF_Event evt;
        evt.type = GF_EVENT_FORWARDED;
        evt.forwarded_event.forward_type = GF_EVT_FORWARDED_MPEG2;
        evt.forwarded_event.forward_type = GF_EVT_FORWARDED_MPEG2;
        evt.forwarded_event.service_event_type = evt_type;
        evt.forwarded_event.param = param;
        gf_term_on_service_event(m2ts->service, &evt);
    }
#endif
    break;
    case GF_M2TS_EVT_AIT_FOUND:
        evt.type = GF_EVENT_FORWARDED;
        evt.forwarded_event.forward_type = GF_EVT_FORWARDED_MPEG2;
        evt.forwarded_event.service_event_type = evt_type;
        evt.forwarded_event.param = param;
        gf_term_on_service_event(m2ts->service, &evt);
        break;
    case GF_M2TS_EVT_PAT_FOUND:
        /* In case the TS has one program, wait for the PMT to send connect, in case of IOD in PMT */
        if (gf_list_count(m2ts->ts->programs) != 1) {
            gf_term_on_connect(m2ts->service, NULL, GF_OK);
            m2ts->is_connected = 1;
        }
        /* Send the TS to the a user if needed. Useful to check the number of received programs*/
        evt.type = GF_EVENT_FORWARDED;
        evt.forwarded_event.forward_type = GF_M2TS_EVT_PAT_FOUND;
        evt.forwarded_event.service_event_type = evt_type;
        evt.forwarded_event.param = ts;
        gf_term_on_service_event(m2ts->service, &evt);
        break;
    case GF_M2TS_EVT_PMT_FOUND:
        if (gf_list_count(m2ts->ts->programs) == 1) {
            gf_term_on_connect(m2ts->service, NULL, GF_OK);
            m2ts->is_connected = 1;
        }

        /*do not declare if  single program was requested for playback*/
        MP2TS_SetupProgram(m2ts, param, m2ts->request_all_pids, m2ts->request_all_pids ? 0 : 1);
        M2TS_FlushRequested(m2ts);
        break;
    case GF_M2TS_EVT_PMT_REPEAT:
//	case GF_M2TS_EVT_PMT_UPDATE:
        M2TS_FlushRequested(m2ts);
        break;
    case GF_M2TS_EVT_SDT_REPEAT:
    case GF_M2TS_EVT_SDT_UPDATE:
    case GF_M2TS_EVT_SDT_FOUND:
        M2TS_FlushRequested(m2ts);
        break;
    case GF_M2TS_EVT_DVB_GENERAL:
        if (m2ts->eit_channel) {
            GF_M2TS_SL_PCK *pck = (GF_M2TS_SL_PCK *)param;
            gf_term_on_sl_packet(m2ts->service, m2ts->eit_channel, pck->data, pck->data_len, NULL, GF_OK);
        }
        break;
    case GF_M2TS_EVT_PES_PCK:
        MP2TS_SendPacket(m2ts, param);
        break;
    case GF_M2TS_EVT_SL_PCK:
        MP2TS_SendSLPacket(m2ts, param);
        break;
    case GF_M2TS_EVT_AAC_CFG:
    {
        GF_M2TS_PES_PCK *pck = (GF_M2TS_PES_PCK*)param;
        if (!pck->stream->first_dts) {
            gf_m2ts_set_pes_framing(pck->stream, GF_M2TS_PES_FRAMING_SKIP_NO_RESET);
            MP2TS_DeclareStream(m2ts, pck->stream, pck->data, pck->data_len);
            if (ts->file || ts->dnload) ts->file_regulate = 1;
            pck->stream->first_dts=1;
            /*force scene regeneration*/
            gf_term_add_media(m2ts->service, NULL, 0);
        }
    }
    break;
    case GF_M2TS_EVT_PES_PCR:
        /*send pcr*/
        if (((GF_M2TS_PES_PCK *) param)->stream && ((GF_M2TS_PES_PCK *) param)->stream->user) {
            GF_SLHeader slh;
            memset(&slh, 0, sizeof(GF_SLHeader) );
            slh.OCRflag = 1;
            slh.m2ts_pcr = ( ((GF_M2TS_PES_PCK *) param)->flags & GF_M2TS_PES_PCK_DISCONTINUITY) ? 2 : 1;
            slh.objectClockReference = ((GF_M2TS_PES_PCK *) param)->PTS;
            gf_term_on_sl_packet(m2ts->service, ((GF_M2TS_PES_PCK *) param)->stream->user, NULL, 0, &slh, GF_OK);
        }
        ((GF_M2TS_PES_PCK *) param)->stream->program->first_dts = 1;

        if ( ((GF_M2TS_PES_PCK *) param)->flags & GF_M2TS_PES_PCK_DISCONTINUITY) {
#if 0
            if (ts->pcr_last) {
                ts->pcr_last = ((GF_M2TS_PES_PCK *) param)->PTS;
                ts->stb_at_last_pcr = gf_sys_clock();
            }
#endif
            GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[M2TS In] PCR discontinuity - switching from old STB "LLD" to new one "LLD"\n", ts->pcr_last, ((GF_M2TS_PES_PCK *) param)->PTS));
            /*FIXME - we need to find a way to treat PCR discontinuities correctly while ignoring broken PCR discontinuities
            seen in many HLS solutions*/
            return;
        }

        if (ts->file_regulate) {
            u64 pcr = ((GF_M2TS_PES_PCK *) param)->PTS;
            u32 stb = gf_sys_clock();

            if (m2ts->regulation_pcr_pid==0) {
                /*we pick the first PCR PID for file regulation - we don't need to make sure this is the PCR of a program being plyaed as we
                only check buffer levels, not DTS/PTS of the streams in the regulation step*/
                m2ts->regulation_pcr_pid = ((GF_M2TS_PES_PCK *) param)->stream->pid;
            } else if (m2ts->regulation_pcr_pid != ((GF_M2TS_PES_PCK *) param)->stream->pid) {
                return;
            }


            if (ts->pcr_last) {
                s32 diff;
                if (pcr < ts->pcr_last) {
                    GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[M2TS In] PCR "LLU" less than previous PCR "LLU"\n", ((GF_M2TS_PES_PCK *) param)->PTS, ts->pcr_last));
                    ts->pcr_last = pcr;
                    ts->stb_at_last_pcr = gf_sys_clock();
                    diff = 0;
                } else {
                    u64 pcr_diff = (pcr - ts->pcr_last);
                    pcr_diff /= 27000;
                    if (pcr_diff>1000) {
                        GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS In] PCR diff too big: "LLU" ms - PCR "LLU" - previous PCR "LLU" - error in TS ?\n", pcr_diff, ((GF_M2TS_PES_PCK *) param)->PTS, ts->pcr_last));
                        diff = 100;
                    } else {
                        diff = (u32) pcr_diff - (stb - ts->stb_at_last_pcr);
                    }
                }
                if (diff<0) {
                    GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS In] Demux not going fast enough according to PCR (drift %d, pcr: "LLU", last pcr: "LLU")\n", diff, pcr, ts->pcr_last));
                } else if (diff>0) {
                    u32 sleep_for=1;
#ifndef GPAC_DISABLE_LOG
                    u32 nb_sleep=0;
#endif
                    /*query buffer level, don't sleep if too low*/
                    GF_NetworkCommand com;
                    com.command_type = GF_NET_BUFFER_QUERY;
                    while (ts->run_state) {
                        gf_term_on_command(m2ts->service, &com, GF_OK);
                        if (com.buffer.occupancy < M2TS_BUFFER_MAX) {
                            GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[M2TS In] Demux not going to sleep: buffer occupancy %d ms\n", com.buffer.occupancy));
                            break;
                        }
                        /*We don't sleep for the entire buffer occupancy, because we would take
                        the risk of starving the audio chains. We try to keep buffers half full*/
#ifndef GPAC_DISABLE_LOG
                        if (!nb_sleep) {
                            GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[M2TS In] Demux going to sleep (buffer occupancy %d ms)\n", com.buffer.occupancy));
                        }
                        nb_sleep++;
#endif
                        gf_sleep(sleep_for);
                    }
#ifndef GPAC_DISABLE_LOG
                    if (nb_sleep) {
                        GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[M2TS In] Demux resume after %d ms - current buffer occupancy %d ms\n", sleep_for*nb_sleep, com.buffer.occupancy));
                    }
#endif
                    ts->nb_pck = 0;
                    ts->pcr_last = pcr;
                    ts->stb_at_last_pcr = gf_sys_clock();
                } else {
                    GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[M2TS In] Demux drift according to PCR (drift %d, pcr: "LLD", last pcr: "LLD")\n", diff, pcr, ts->pcr_last));
                }
            } else {
                ts->pcr_last = pcr;
                ts->stb_at_last_pcr = gf_sys_clock();
            }
        }
        break;
    case GF_M2TS_EVT_TDT:
        if (m2ts->hybrid_on) {
            u32 i, count;
            GF_M2TS_TDT_TOT *tdt = (GF_M2TS_TDT_TOT *)param;
            GF_NetworkCommand com;
            memset(&com, 0, sizeof(com));
            com.command_type = GF_NET_CHAN_MAP_TIME;
            com.map_time.media_time = tdt->hour*3600+tdt->minute*60+tdt->second;
            com.map_time.reset_buffers = 0;
            count = gf_list_count(ts->programs);
            for (i=0; i<count; i++) {
                GF_M2TS_Program *prog = gf_list_get(ts->programs, i);
                u32 j, count2;
                if (prog->tdt_found || !prog->last_pcr_value) /*map TDT one time, after we received a PCR*/
                    continue;
                prog->tdt_found = 1;
                count2 = gf_list_count(prog->streams);
                com.map_time.timestamp = prog->last_pcr_value/300;
                for (j=0; j<count2; j++) {
                    GF_M2TS_ES * stream = gf_list_get(prog->streams, j);
                    if (stream->user) {
                        com.base.on_channel = stream->user;
                        gf_term_on_command(m2ts->service, &com, GF_OK);
                    }
                }
                GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[M2TS In] Mapping TDT Time %04d/%02d/%02d %02d:%02d:%02d and PCR time "LLD" on program %d\n",
                                                       tdt->year, tdt->month, tdt->day, tdt->hour, tdt->minute, tdt->second, com.map_time.timestamp, prog->number));
            }
        }
        break;
    case GF_M2TS_EVT_TOT:
        break;
    }
}
Exemple #14
0
static void gf_rtp_switch_quality(RTPClient *rtp, Bool switch_up)
{
	u32 i,count;
	RTPStream *ch, *cur_ch;
	GF_NetworkCommand com;

	count = gf_list_count(rtp->channels);
	/*find the current stream*/
	ch = cur_ch = NULL;
	for (i = 0; i < count; i++) {
		cur_ch = (RTPStream *) gf_list_get(rtp->channels, i);
		if (cur_ch->mid != rtp->cur_mid) {
			cur_ch=NULL;
			continue;
		}
		break;
	}
	if (!cur_ch) return;

	if (switch_up)
	{
		/*this is the highest stream*/
		if (!cur_ch->next_stream)
		{
			cur_ch->status = RTP_Running;
			return;
		}
		else
		{
			for (i = 0; i < count; i++) {
				ch = (RTPStream *) gf_list_get(rtp->channels, i);
				if (ch->mid == cur_ch->next_stream)
				{
					/*resume streaming next channel*/
					gf_mx_p(rtp->mx);
					RP_InitStream(ch, 0);
					gf_mx_v(rtp->mx);
					ch->status = RTP_Running;
					rtp->cur_mid = ch->mid;
					break;
				}

			}
		}
	}
	else
	{
		/*this is the lowest stream i.e base layer*/
		if (!cur_ch->prev_stream)
		{
			cur_ch->status = RTP_Running;
			return;
		}
		else
		{
			for (i = 0; i < count; i++) {
				ch = (RTPStream *) gf_list_get(rtp->channels, i);
				if (ch->mid == cur_ch->prev_stream)
				{
					/*stop streaming current channel*/
					gf_rtp_stop(cur_ch->rtp_ch);
					cur_ch->status = RTP_Connected;
					com.command_type = GF_NET_CHAN_RESET;
					com.base.on_channel = cur_ch;
					gf_term_on_command(rtp->service, &com, GF_OK);
					rtp->cur_mid = ch->mid;
					break;
				}
			}
		}
	}
	GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("Switch from ES%d to ES %d\n", cur_ch->mid, ch->mid));
	return;
}
Exemple #15
0
void MP3_NetIO(void *cbk, GF_NETIO_Parameter *param)
{
	GF_Err e;
	const char *szCache;
	u32 total_size, bytes_done;
	MP3Reader *read = (MP3Reader *) cbk;

	e = param->error;
	/*done*/
	if (param->msg_type==GF_NETIO_DATA_TRANSFERED) {
		if (read->stream) {
			read->is_remote = 0;
			e = GF_EOS;
		} else {
			return;
		}
	}
	else if (param->msg_type==GF_NETIO_PARSE_HEADER) {
		if (!strcmp(param->name, "icy-name")) {
			if (read->icy_name) gf_free(read->icy_name);
			read->icy_name = gf_strdup(param->value);
		}
		if (!strcmp(param->name, "icy-genre")) {
			if (read->icy_genre) gf_free(read->icy_genre);
			read->icy_genre = gf_strdup(param->value);
		}
		if (!strcmp(param->name, "icy-meta")) {
			GF_NetworkCommand com;
			char *meta;
			meta = param->value;
			while (meta && meta[0]) {
				char *sep = strchr(meta, ';');
				if (sep) sep[0] = 0;

				if (!strnicmp(meta, "StreamTitle=", 12)) {
					 if (read->icy_track_name) gf_free(read->icy_track_name);
					    read->icy_track_name = NULL;
					read->icy_track_name = gf_strdup(meta+12);
					if (!strcmp(read->icy_track_name, "''")){
					  /* On some servers, '' means not track name */
					  gf_free(read->icy_track_name);
					  read->icy_track_name = NULL;
					}
				}
				if (!sep) break;
				sep[0] = ';';
				meta = sep+1;
			}

			com.base.command_type = GF_NET_SERVICE_INFO;
			gf_term_on_command(read->service, &com, GF_OK);
		}
		return;
	} else {
		/*handle service message*/
		gf_term_download_update_stats(read->dnload);
		if (param->msg_type!=GF_NETIO_DATA_EXCHANGE) return;
	}

	if (e >= GF_OK) {
		if (read->needs_connection) {
			gf_dm_sess_get_stats(read->dnload, NULL, NULL, &total_size, NULL, NULL, NULL);
			if (!total_size) read->is_live = 1;
		}
		/*looks like a live stream*/
		if (read->is_live) {
		  if (read->liveDataCopySize < param->size){
		      read->liveDataCopy = gf_realloc(read->liveDataCopy, param->size);
		  }
		  memcpy(read->liveDataCopy, param->data, param->size);
		  if (!e) MP3_OnLiveData(read, read->liveDataCopy, param->size);
		  return;
		}

		if (read->stream) return;

		/*open service*/
		szCache = gf_dm_sess_get_cache_name(read->dnload);
		if (!szCache) e = GF_IO_ERR;
		else {
			read->stream = gf_f64_open((char *) szCache, "rb");
			if (!read->stream) e = GF_SERVICE_ERROR;
			else {
				u32 minSizeToRead = 0;
				/*if full file at once (in cache) parse duration*/
				if (e==GF_EOS) read->is_remote = 0;
				e = GF_OK;
				/*not enough data*/
				if (!MP3_ConfigureFromFile(read, &minSizeToRead)) {
					gf_dm_sess_get_stats(read->dnload, NULL, NULL, NULL, &bytes_done, NULL, NULL);
					/*bad data - there's likely some ID3 around...*/
					if (bytes_done>(100*1024 + minSizeToRead)) {
						e = GF_CORRUPTED_DATA;
					} else {
						fclose(read->stream);
						read->stream = NULL;
						return;
					}
				}
			}
		}
	}

	/*OK confirm*/
	if (read->needs_connection) {
		read->needs_connection = 0;
		gf_term_on_connect(read->service, NULL, e);
		if (!e) mp3_setup_object(read);
	}
}
Exemple #16
0
void RP_ProcessRTCP(RTPStream *ch, char *pck, u32 size)
{
	Bool has_sr;
	GF_Err e;

	if (ch->status == RTP_Connected) return;

	ch->rtcp_bytes += size;

	e = gf_rtp_decode_rtcp(ch->rtp_ch, pck, size, &has_sr);
	if (e<0) return;

	/*update sync if on pure RTP*/
	if (!ch->rtcp_init && has_sr) {
		Double ntp_clock;

		ntp_clock = ch->rtp_ch->last_SR_NTP_sec;
		ntp_clock += ((Double)ch->rtp_ch->last_SR_NTP_frac)/0xFFFFFFFF;

		if (!ch->owner->last_ntp) {
			//add safety in case this RTCP report is received before another report
			//that was supposed to come in earlier (with earlier NTP)
			//Double safety_offset, time = ch->rtp_ch->last_SR_rtp_time;
			//time /= ch->rtp_ch->TimeScale;
			//safety_offset = time/2;
			ch->owner->last_ntp = ntp_clock;
		}

		if (ntp_clock >= ch->owner->last_ntp) {
			ntp_clock -= ch->owner->last_ntp;
		} else {
			ntp_clock = 0;
		}

		//assert(ch->rtp_ch->last_SR_rtp_time >= (u64) (ntp_clock * ch->rtp_ch->TimeScale));
		ch->ts_offset = ch->rtp_ch->last_SR_rtp_time;
		ch->ts_offset -= (s64) (ntp_clock * ch->rtp_ch->TimeScale);

#if 0
		GF_NetworkCommand com;
		memset(&com, 0, sizeof(com));
		com.command_type = GF_NET_CHAN_MAP_TIME;
		com.base.on_channel = ch->channel;
		com.map_time.media_time = ntp;

		if (com.map_time.media_time >= ch->owner->last_ntp) {
			com.map_time.media_time -= ch->owner->last_ntp;
		} else {
			com.map_time.media_time = 0;
		}
		com.map_time.timestamp = ch->rtp_ch->last_SR_rtp_time;
		com.map_time.reset_buffers = 1;
		gf_term_on_command(ch->owner->service, &com, GF_OK);
#endif

		GF_LOG(GF_LOG_INFO, GF_LOG_RTP, ("[RTCP] At %d Using Sender Report to map RTP TS %d to NTP clock %g - new TS offset "LLD" \n",
		                                 gf_sys_clock(), ch->rtp_ch->last_SR_rtp_time, ntp_clock, ch->ts_offset
		                                ));

		ch->rtcp_init = 1;
		ch->check_rtp_time = RTP_SET_TIME_NONE;
	}

	if (e == GF_EOS) {
		ch->flags |= RTP_EOS;
		ch->stat_stop_time = gf_sys_clock();
		gf_term_on_sl_packet(ch->owner->service, ch->channel, NULL, 0, NULL, GF_EOS);
	}
}
Exemple #17
0
static GF_Err MPD_ClientQuery(GF_InputService *ifce, GF_NetworkCommand *param)
{
	u32 i;
	GF_MPD_In *mpdin = (GF_MPD_In *) ifce->proxy_udta;
    if (!param || !ifce || !ifce->proxy_udta) return GF_BAD_PARAM;

	/*gets byte range of init segment (for local validation)*/
	if (param->command_type==GF_NET_SERVICE_QUERY_INIT_RANGE) {
		param->url_query.next_url = NULL;
		param->url_query.start_range = 0;
		param->url_query.end_range = 0;

		mpdin->in_seek = 0;

		for (i=0; i<gf_dash_get_group_count(mpdin->dash); i++) {
			GF_MPDGroup *group;
			if (!gf_dash_is_group_selected(mpdin->dash, i)) continue;
			group = gf_dash_get_group_udta(mpdin->dash, i);
			if (group->segment_ifce == ifce) {
				gf_dash_group_get_segment_init_url(mpdin->dash, i, &param->url_query.start_range, &param->url_query.end_range);
				return GF_OK;
			}
		}
		return GF_SERVICE_ERROR;
	}

	/*gets URL and byte range of next segment - if needed, adds butstream switching segment info*/
	if (param->command_type==GF_NET_SERVICE_QUERY_NEXT) {
		Bool group_done;
		u32 nb_segments_cached;
		u32 group_idx=0;
		GF_MPDGroup *group=NULL;
		const char *src_url;
		Bool discard_first_cache_entry = 1;
        u32 timer = gf_sys_clock();
		GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MPD_IN] Received Service Query Next request from input service %s\n", ifce->module_name));


		param->url_query.discontinuity_type = 0;
		if (mpdin->in_seek) {
			mpdin->in_seek = 0;
			param->url_query.discontinuity_type = 2;
			discard_first_cache_entry = 0;
		}
		for (i=0; i<gf_dash_get_group_count(mpdin->dash); i++) {
			if (!gf_dash_is_group_selected(mpdin->dash, i)) continue;
			group = gf_dash_get_group_udta(mpdin->dash, i);
			if (group->segment_ifce == ifce) {
				group_idx = i;
				break;
			}
			group=NULL;
		}

		if (!group) {
			return GF_SERVICE_ERROR;
		}

		while (gf_dash_is_running(mpdin->dash) ) {
			group_done=0;
			nb_segments_cached = gf_dash_group_get_num_segments_ready(mpdin->dash, group_idx, &group_done);
			if (nb_segments_cached>=2)
				break;

			if (group_done) {
				if (!gf_dash_get_period_switch_status(mpdin->dash) && !gf_dash_in_last_period(mpdin->dash) ) {
					GF_NetworkCommand com;
					memset(&com, 0, sizeof(GF_NetworkCommand));
					com.command_type = GF_NET_BUFFER_QUERY;
					while (gf_dash_get_period_switch_status(mpdin->dash) != 1) {
						gf_term_on_command(mpdin->service, &com, GF_OK);
						if (!com.buffer.occupancy) {
							gf_dash_request_period_switch(mpdin->dash);
							break;
						}
						gf_sleep(20);
					}
				}
				return GF_EOS;
			}
            gf_sleep(16);
        }

		nb_segments_cached = gf_dash_group_get_num_segments_ready(mpdin->dash, group_idx, &group_done);
        if (nb_segments_cached < 2) {
            GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[MPD_IN] No more file in cache, EOS\n"));
            return GF_EOS;
        } else {
            GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MPD_IN] Had to wait for %u ms for the only cache file to be downloaded\n", (gf_sys_clock() - timer)));
        }

		if (discard_first_cache_entry) {
			gf_dash_group_discard_segment(mpdin->dash, group_idx);
		}

		gf_dash_group_get_next_segment_location(mpdin->dash, group_idx, &param->url_query.next_url, &param->url_query.start_range, &param->url_query.end_range,
								&param->url_query.next_url_init_or_switch_segment, &param->url_query.switch_start_range , &param->url_query.switch_end_range,
								&src_url);

        {
            u32 timer2 = gf_sys_clock() - timer ;
            if (timer2 > 1000) {
                GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MPD_IN] Waiting for download to end took a long time : %u ms\n", timer2));
            }
			if (param->url_query.end_range) {
				GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[MPD_IN] Switching segment playback to %s (Media Range: "LLD"-"LLD")\n", src_url, param->url_query.start_range, param->url_query.end_range));
			} else {
	            GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[MPD_IN] Switching segment playback to %s\n", src_url));
			}
            GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[MPD_IN] segment start time %g sec\n", gf_dash_group_current_segment_start_time(mpdin->dash, group_idx) ));

			GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MPD_IN] Waited %d ms - Elements in cache: %u/%u\n\tCache file name %s\n", timer2, gf_dash_group_get_num_segments_ready(mpdin->dash, group_idx, &group_done), gf_dash_group_get_max_segments_in_cache(mpdin->dash, group_idx), param->url_query.next_url ));
        }
	    return GF_OK;
    }


    return GF_OK;
}
Exemple #18
0
static GF_Err MPD_ClientQuery(GF_InputService *ifce, GF_NetworkCommand *param)
{
	u32 i;
	GF_Err e;
	GF_MPD_In *mpdin = (GF_MPD_In *) ifce->proxy_udta;
	if (!param || !ifce || !ifce->proxy_udta) return GF_BAD_PARAM;

	/*gets byte range of init segment (for local validation)*/
	if (param->command_type==GF_NET_SERVICE_QUERY_INIT_RANGE) {
		param->url_query.next_url = NULL;
		param->url_query.start_range = 0;
		param->url_query.end_range = 0;

		mpdin->in_seek = 0;

		for (i=0; i<gf_dash_get_group_count(mpdin->dash); i++) {
			GF_MPDGroup *group;
			if (!gf_dash_is_group_selectable(mpdin->dash, i)) continue;
			group = gf_dash_get_group_udta(mpdin->dash, i);
			if (group->segment_ifce == ifce) {
				gf_dash_group_get_segment_init_url(mpdin->dash, i, &param->url_query.start_range, &param->url_query.end_range);
				param->url_query.current_download = 0;
				return GF_OK;
			}
		}
		return GF_SERVICE_ERROR;
	}

	/*gets URL and byte range of next segment - if needed, adds bitstream switching segment info*/
	if (param->command_type==GF_NET_SERVICE_QUERY_NEXT) {
		Bool group_done;
		u32 nb_segments_cached;
		u32 group_idx=0;
		GF_MPDGroup *group=NULL;
		const char *src_url;
		Bool discard_first_cache_entry = param->url_query.drop_first_segment;
		Bool check_current_download = param->url_query.current_download;
		u32 timer = gf_sys_clock();
		GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MPD_IN] Received Service Query Next request from input service %s\n", ifce->module_name));


		param->url_query.current_download = 0;
		param->url_query.discontinuity_type = 0;
		if (mpdin->in_seek) {
			mpdin->in_seek = 0;
			param->url_query.discontinuity_type = 2;
			discard_first_cache_entry = 0;
		}

		for (i=0; i<gf_dash_get_group_count(mpdin->dash); i++) {
			if (!gf_dash_is_group_selected(mpdin->dash, i)) continue;
			group = gf_dash_get_group_udta(mpdin->dash, i);
			if (group->segment_ifce == ifce) {
				group_idx = i;
				break;
			}
			group=NULL;
		}

		if (!group) {
			return GF_SERVICE_ERROR;
		}
		//update group idx
		if (group->idx != group_idx) {
			group->idx = group_idx;
			GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MPD_IN] New AdaptationSet detected after MPD update ?\n"));
		}

		if (discard_first_cache_entry) {
			GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MPD_IN] Discarding first segment in cache\n"));
			gf_dash_group_discard_segment(mpdin->dash, group_idx);
		}

		while (gf_dash_is_running(mpdin->dash) ) {
			group_done=0;
			nb_segments_cached = gf_dash_group_get_num_segments_ready(mpdin->dash, group_idx, &group_done);
			if (nb_segments_cached>=1)
				break;

			if (group_done) {
				if (!gf_dash_get_period_switch_status(mpdin->dash) && !gf_dash_in_last_period(mpdin->dash) ) {
					GF_NetworkCommand com;
					memset(&com, 0, sizeof(GF_NetworkCommand));
					com.command_type = GF_NET_BUFFER_QUERY;
					while (gf_dash_get_period_switch_status(mpdin->dash) != 1) {
						gf_term_on_command(mpdin->service, &com, GF_OK);
						if (!com.buffer.occupancy) {
							gf_dash_request_period_switch(mpdin->dash);
							break;
						}
						gf_sleep(30);
					}
				}
				return GF_EOS;
			}

			if (check_current_download && mpdin->use_low_latency) {
				Bool is_switched=GF_FALSE;
				gf_dash_group_probe_current_download_segment_location(mpdin->dash, group_idx, &param->url_query.next_url, NULL, &param->url_query.next_url_init_or_switch_segment, &src_url, &is_switched);

				if (param->url_query.next_url) {
					param->url_query.current_download = 1;
					param->url_query.has_new_data = group->has_new_data;
					param->url_query.discontinuity_type = is_switched ? 1 : 0;
					if (gf_dash_group_loop_detected(mpdin->dash, group_idx))
						param->url_query.discontinuity_type = 2;
					group->has_new_data = 0;
					return GF_OK;
				}
				return GF_BUFFER_TOO_SMALL;
			}
			return GF_BUFFER_TOO_SMALL;
		}

		param->url_query.current_download = 0;
		nb_segments_cached = gf_dash_group_get_num_segments_ready(mpdin->dash, group_idx, &group_done);
		if (nb_segments_cached < 1) {
			GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[MPD_IN] No more file in cache, EOS\n"));
			return GF_EOS;
		} else {
			GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MPD_IN] Had to wait for %u ms for the only cache file to be downloaded\n", (gf_sys_clock() - timer)));
		}

		e = gf_dash_group_get_next_segment_location(mpdin->dash, group_idx, param->url_query.dependent_representation_index, &param->url_query.next_url, &param->url_query.start_range, &param->url_query.end_range,
		        NULL, &param->url_query.next_url_init_or_switch_segment, &param->url_query.switch_start_range , &param->url_query.switch_end_range,
		        &src_url, &param->url_query.has_next);
		if (e)
			return e;

		if (gf_dash_group_loop_detected(mpdin->dash, group_idx))
			param->url_query.discontinuity_type = 2;


#ifndef GPAC_DISABLE_LOG
		{
			u32 timer2 = gf_sys_clock() - timer ;
			if (timer2 > 1000) {
				GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MPD_IN] Waiting for download to end took a long time : %u ms\n", timer2));
			}
			if (param->url_query.end_range) {
				GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[MPD_IN] Next Segment is %s bytes "LLD"-"LLD"\n", src_url, param->url_query.start_range, param->url_query.end_range));
			} else {
				GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[MPD_IN] Next Segment is %s\n", src_url));
			}
			GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[MPD_IN] Waited %d ms - Elements in cache: %u/%u\n\tCache file name %s\n\tsegment start time %g sec\n", timer2, gf_dash_group_get_num_segments_ready(mpdin->dash, group_idx, &group_done), gf_dash_group_get_max_segments_in_cache(mpdin->dash, group_idx), param->url_query.next_url, gf_dash_group_current_segment_start_time(mpdin->dash, group_idx)  ));
		}
#endif
	}


	return GF_OK;
}