Ejemplo n.º 1
0
static void rtp_sl_packet_cbk(void *udta, char *payload, u32 size, GF_SLHeader *hdr, GF_Err e)
{
	u64 cts, dts;
	RTPStream *ch = (RTPStream *)udta;

	if (!ch->rtcp_init) {
		if (!ch->rtcp_check_start) {
			ch->rtcp_check_start = gf_sys_clock();
			return;
		}
		else if (gf_sys_clock() - ch->rtcp_check_start <= RTCP_DEFAULT_TIMEOUT_MS) {
			return;
		}
		GF_LOG(GF_LOG_WARNING, GF_LOG_RTP, ("[RTP] Timeout for RTCP: no SR recevied after %d ms - forcing playback, sync may be broken\n", RTCP_DEFAULT_TIMEOUT_MS));
		ch->rtcp_init = 1;
	}
	cts = hdr->compositionTimeStamp;
	dts = hdr->decodingTimeStamp;
	hdr->compositionTimeStamp -= ch->ts_offset;
	hdr->decodingTimeStamp -= ch->ts_offset;

	if (ch->rtp_ch->packet_loss) e = GF_REMOTE_SERVICE_ERROR;

	if (ch->owner->first_packet_drop && (hdr->packetSequenceNumber >= ch->owner->first_packet_drop) ) {
		if ( (hdr->packetSequenceNumber - ch->owner->first_packet_drop) % ch->owner->frequency_drop)
			gf_service_send_packet(ch->owner->service, ch->channel, payload, size, hdr, e);
	} else {
		gf_service_send_packet(ch->owner->service, ch->channel, payload, size, hdr, e);
	}
	hdr->compositionTimeStamp = cts;
	hdr->decodingTimeStamp = dts;
}
Ejemplo n.º 2
0
void OGG_EndOfFile(OGGReader *read)
{
	OGGStream *st;
	u32 i=0;
	while ((st = gf_list_enum(read->streams, &i))) {
		gf_service_send_packet(read->service, st->ch, NULL, 0, NULL, GF_EOS);
	}
}
Ejemplo n.º 3
0
static void AC3_OnLiveData(AC3Reader *read, const char *data, u32 data_size)
{
	u64 pos;
	Bool sync;
	GF_BitStream *bs;
	GF_AC3Header hdr;

	memset(&hdr, 0, sizeof(GF_AC3Header));

	read->data = gf_realloc(read->data, sizeof(char)*(read->data_size+data_size) );
	memcpy(read->data + read->data_size, data, sizeof(char)*data_size);
	read->data_size += data_size;

	if (read->needs_connection) {
		read->needs_connection = 0;
		bs = gf_bs_new((char *) read->data, read->data_size, GF_BITSTREAM_READ);
		sync = gf_ac3_parser_bs(bs, &hdr, GF_TRUE);
		gf_bs_del(bs);
		if (!sync) return;
		read->nb_ch = hdr.channels;
		read->sample_rate = hdr.sample_rate;
		read->is_live = 1;
		memset(&read->sl_hdr, 0, sizeof(GF_SLHeader));
		gf_service_connect_ack(read->service, NULL, GF_OK);
		AC3_SetupObject(read);
	}
	if (!read->ch) return;

	/*need a full ac3 header*/
	if (read->data_size<=7) return;

	bs = gf_bs_new((char *) read->data, read->data_size, GF_BITSTREAM_READ);
	hdr.framesize = 0;
	pos = 0;
	while (gf_ac3_parser_bs(bs, &hdr, GF_FALSE)) {
		pos = gf_bs_get_position(bs);
		read->sl_hdr.accessUnitStartFlag = 1;
		read->sl_hdr.accessUnitEndFlag = 1;
		read->sl_hdr.AU_sequenceNumber++;
		read->sl_hdr.compositionTimeStampFlag = 1;
		read->sl_hdr.compositionTimeStamp += 1536;
		gf_service_send_packet(read->service, read->ch, (char *) read->data + pos, hdr.framesize, &read->sl_hdr, GF_OK);
		gf_bs_skip_bytes(bs, hdr.framesize);
	}

	pos = gf_bs_get_position(bs);
	gf_bs_del(bs);

	if (pos) {
		u8 *d;
		read->data_size -= (u32) pos;
		d = gf_malloc(sizeof(char) * read->data_size);
		memcpy(d, read->data + pos, sizeof(char) * read->data_size);
		gf_free(read->data);
		read->data = d;
	}
	AC3_RegulateDataRate(read);
}
Ejemplo n.º 4
0
void RP_ProcessRTCP(RTPStream *ch, char *pck, u32 size)
{
	Bool has_sr;
	GF_Err e;

	if (ch->status == RTP_Connected) return;

	ch->rtcp_bytes += size;

	e = gf_rtp_decode_rtcp(ch->rtp_ch, pck, size, &has_sr);
	if (e<0) return;

	/*update sync if on pure RTP*/
	if (!ch->rtcp_init && has_sr) {
		Double ntp_clock;

		ntp_clock = ch->rtp_ch->last_SR_NTP_sec;
		ntp_clock += ((Double)ch->rtp_ch->last_SR_NTP_frac)/0xFFFFFFFF;

		if (!ch->owner->last_ntp) {
			//add safety in case this RTCP report is received before another report
			//that was supposed to come in earlier (with earlier NTP)
			//Double safety_offset, time = ch->rtp_ch->last_SR_rtp_time;
			//time /= ch->rtp_ch->TimeScale;
			//safety_offset = time/2;
			ch->owner->last_ntp = ntp_clock;
		}

		if (ntp_clock >= ch->owner->last_ntp) {
			ntp_clock -= ch->owner->last_ntp;
		} else {
			ntp_clock = 0;
		}

		//assert(ch->rtp_ch->last_SR_rtp_time >= (u64) (ntp_clock * ch->rtp_ch->TimeScale));
		ch->ts_offset = ch->rtp_ch->last_SR_rtp_time;
		ch->ts_offset -= (s64) (ntp_clock * ch->rtp_ch->TimeScale);


		GF_LOG(GF_LOG_INFO, GF_LOG_RTP, ("[RTCP] At %d Using Sender Report to map RTP TS %d to NTP clock %g - new TS offset "LLD" \n",
		                                 gf_sys_clock(), ch->rtp_ch->last_SR_rtp_time, ntp_clock, ch->ts_offset
		                                ));

		ch->rtcp_init = 1;
		ch->check_rtp_time = RTP_SET_TIME_NONE;
	}

	if (e == GF_EOS) {
		ch->flags |= RTP_EOS;
		ch->stat_stop_time = gf_sys_clock();
		gf_service_send_packet(ch->owner->service, ch->channel, NULL, 0, NULL, GF_EOS);
	}
}
Ejemplo n.º 5
0
static u32 FFDemux_Run(void *par)
{
	AVPacket pkt;
	s64 seek_to;
	GF_NetworkCommand com;
	GF_NetworkCommand map;
	GF_SLHeader slh;
	FFDemux *ffd = (FFDemux *) par;

	memset(&map, 0, sizeof(GF_NetworkCommand));
	map.command_type = GF_NET_CHAN_MAP_TIME;

	memset(&com, 0, sizeof(GF_NetworkCommand));
	com.command_type = GF_NET_BUFFER_QUERY;

	memset(&slh, 0, sizeof(GF_SLHeader));

	slh.compositionTimeStampFlag = slh.decodingTimeStampFlag = 1;

	while (ffd->is_running) {
		//nothing connected, wait
		if (!ffd->video_ch && !ffd->audio_ch) {
			gf_sleep(100);
			continue;
		}

		if ((ffd->seek_time>=0) && ffd->seekable) {
			seek_to = (s64) (AV_TIME_BASE*ffd->seek_time);
			av_seek_frame(ffd->ctx, -1, seek_to, AVSEEK_FLAG_BACKWARD);
			ffd->seek_time = -1;
		}
		pkt.stream_index = -1;
		/*EOF*/
		if (av_read_frame(ffd->ctx, &pkt) <0) break;
		if (pkt.pts == AV_NOPTS_VALUE) pkt.pts = pkt.dts;
		if (!pkt.dts) pkt.dts = pkt.pts;

		slh.compositionTimeStamp = pkt.pts;
		slh.decodingTimeStamp = pkt.dts;

		gf_mx_p(ffd->mx);
		/*blindly send audio as soon as video is init*/
		if (ffd->audio_ch && (pkt.stream_index == ffd->audio_st) ) {
			slh.compositionTimeStamp *= ffd->audio_tscale.num;
			slh.decodingTimeStamp *= ffd->audio_tscale.num;

			gf_service_send_packet(ffd->service, ffd->audio_ch, (char *) pkt.data, pkt.size, &slh, GF_OK);
		}
		else if (ffd->video_ch && (pkt.stream_index == ffd->video_st)) {
			slh.compositionTimeStamp *= ffd->video_tscale.num;
			slh.decodingTimeStamp *= ffd->video_tscale.num;
			slh.randomAccessPointFlag = pkt.flags&AV_PKT_FLAG_KEY ? 1 : 0;
			gf_service_send_packet(ffd->service, ffd->video_ch, (char *) pkt.data, pkt.size, &slh, GF_OK);
		}
		gf_mx_v(ffd->mx);
		av_free_packet(&pkt);

		/*sleep untill the buffer occupancy is too low - note that this work because all streams in this
		demuxer are synchronized*/
		while (ffd->audio_run || ffd->video_run) {
			gf_service_command(ffd->service, &com, GF_OK);
			if (com.buffer.occupancy < com.buffer.max)
				break;

			gf_sleep(1);
		}

		if (!ffd->audio_run && !ffd->video_run) break;
	}
	/*signal EOS*/
	if (ffd->audio_ch) gf_service_send_packet(ffd->service, ffd->audio_ch, NULL, 0, NULL, GF_EOS);
	if (ffd->video_ch) gf_service_send_packet(ffd->service, ffd->video_ch, NULL, 0, NULL, GF_EOS);
	ffd->is_running = 2;

	return 0;
}
Ejemplo n.º 6
0
void RP_ProcessRTP(RTPStream *ch, char *pck, u32 size)
{
	GF_NetworkCommand com;
	GF_Err e;
	GF_RTPHeader hdr;
	u32 PayloadStart;
	ch->rtp_bytes += size;

	/*first decode RTP*/
	e = gf_rtp_decode_rtp(ch->rtp_ch, pck, size, &hdr, &PayloadStart);

	/*corrupted or NULL data*/
	if (e || (PayloadStart >= size)) {
		//gf_service_send_packet(ch->owner->service, ch->channel, NULL, 0, NULL, GF_CORRUPTED_DATA);
		return;
	}

	/*if we must notify some timing, do it now. If the channel has no range, this should NEVER be called*/
	if (ch->check_rtp_time /*&& gf_rtp_is_active(ch->rtp_ch)*/) {
		Double ch_time;

		/*it may happen that we still receive packets from a previous "play" request. If this is the case,
		filter until we reach the indicated rtptime*/
		if (ch->rtp_ch->rtp_time
		        && (ch->rtp_ch->rtp_first_SN > hdr.SequenceNumber)
		        && (ch->rtp_ch->rtp_time < hdr.TimeStamp)
		   ) {
			GF_LOG(GF_LOG_WARNING, GF_LOG_RTP, ("[RTP] Rejecting too early packet (TS %d vs signaled rtp time %d - diff %d ms)\n",
			                                    hdr.TimeStamp, ch->rtp_ch->rtp_time, ((hdr.TimeStamp - ch->rtp_ch->rtp_time)*1000) / ch->rtp_ch->TimeScale));
			return;
		}

		ch_time = gf_rtp_get_current_time(ch->rtp_ch);

		/*this is the first packet on the channel (no PAUSE)*/
		if (ch->check_rtp_time == RTP_SET_TIME_RTP) {
			/*Note: in a SEEK with RTSP, the rtp-info time given by the server is
			the rtp time of the desired range. But the server may (and should) send from
			the previous I frame on video, so the time of the first rtp packet after
			a SEEK can actually be less than CurrentStart. We don't drop these
			packets in order to see the maximum video. We could drop it, this would mean
			wait for next RAP...*/

			memset(&com, 0, sizeof(com));
			com.command_type = GF_NET_CHAN_MAP_TIME;
			com.base.on_channel = ch->channel;
			if (ch->rtsp) {
				com.map_time.media_time = ch->current_start + ch_time;
			} else {
				com.map_time.media_time = 0;
			}

			com.map_time.timestamp = hdr.TimeStamp;
			com.map_time.reset_buffers = 0;
			gf_service_command(ch->owner->service, &com, GF_OK);

			GF_LOG(GF_LOG_INFO, GF_LOG_RTP, ("[RTP] Mapping RTP Time seq %d TS %d Media Time %g - rtp info seq %d TS %d\n",
			                                 hdr.SequenceNumber, hdr.TimeStamp, com.map_time.media_time, ch->rtp_ch->rtp_first_SN, ch->rtp_ch->rtp_time
			                                ));

			/*skip RTCP clock init when RTSP is used*/
			if (ch->rtsp) ch->rtcp_init = 1;

//			if (ch->depacketizer->payt==GF_RTP_PAYT_H264_AVC) ch->depacketizer->flags |= GF_RTP_AVC_WAIT_RAP;
		}
		/*this is RESUME on channel, filter packet based on time (darwin seems to send
		couple of packet before)
		do not fetch if we're below 10 ms or <0, because this means we already have
		this packet - as the PAUSE is issued with the RTP currentTime*/
		else if (ch_time <= 0.021) {
			return;
		}
		ch->check_rtp_time = RTP_SET_TIME_NONE;
	}

	gf_rtp_depacketizer_process(ch->depacketizer, &hdr, pck + PayloadStart, size - PayloadStart);

	/*last check: signal EOS if we're close to end range in case the server do not send RTCP BYE*/
	if ((ch->flags & RTP_HAS_RANGE) && !(ch->flags & RTP_EOS) ) {
		/*also check last CTS*/
		Double ts = (Double) ((u32) ch->depacketizer->sl_hdr.compositionTimeStamp - hdr.TimeStamp);
		ts /= gf_rtp_get_clockrate(ch->rtp_ch);
		if (ABSDIFF(ch->range_end, (ts + ch->current_start + gf_rtp_get_current_time(ch->rtp_ch)) ) < 0.2) {
			ch->flags |= RTP_EOS;
			ch->stat_stop_time = gf_sys_clock();
			gf_service_send_packet(ch->owner->service, ch->channel, NULL, 0, NULL, GF_EOS);
		}
	}
}
Ejemplo n.º 7
0
Archivo: saf_in.c Proyecto: Bevara/GPAC
static void SAF_NetIO(void *cbk, GF_NETIO_Parameter *param)
{
	GF_Err e;
	Bool is_rap, go;
	SAFChannel *ch;
	u32 cts, au_sn, au_size, type, i, stream_id;
	u64 bs_pos;
	GF_BitStream *bs;
	GF_SLHeader sl_hdr;

	SAFIn *read = (SAFIn *) cbk;

	e = param->error;
	/*done*/
	if (param->msg_type==GF_NETIO_DATA_TRANSFERED) {
		if (read->stream && (read->saf_type==SAF_FILE_REMOTE)) read->saf_type = SAF_FILE_LOCAL;
		return;
	} else {
		/*handle service message*/
		gf_service_download_update_stats(read->dnload);
		if (param->msg_type!=GF_NETIO_DATA_EXCHANGE) {
			if (e<0) {
				if (read->needs_connection) {
					read->needs_connection = 0;
					gf_service_connect_ack(read->service, NULL, e);
				}
				return;
			}
			if (read->needs_connection) {
				u32 total_size;
				gf_dm_sess_get_stats(read->dnload, NULL, NULL, &total_size, NULL, NULL, NULL);
				if (!total_size) read->saf_type = SAF_LIVE_STREAM;
			}
			return;
		}
	}
	if (!param->size) return;

	if (!read->run_state) return;

	if (read->alloc_size < read->saf_size + param->size) {
		read->saf_data = (char*)gf_realloc(read->saf_data, sizeof(char)*(read->saf_size + param->size) );
		read->alloc_size = read->saf_size + param->size;
	}
	memcpy(read->saf_data + read->saf_size, param->data, sizeof(char)*param->size);
	read->saf_size += param->size;

	/*first AU not complete yet*/
	if (read->saf_size<10) return;

	bs = gf_bs_new(read->saf_data, read->saf_size, GF_BITSTREAM_READ);
	bs_pos = 0;

	go = 1;
	while (go) {
		u64 avail = gf_bs_available(bs);
		bs_pos = gf_bs_get_position(bs);

		if (avail<10) break;

		is_rap = gf_bs_read_int(bs, 1);
		au_sn = gf_bs_read_int(bs, 15);
		gf_bs_read_int(bs, 2);
		cts = gf_bs_read_int(bs, 30);
		au_size = gf_bs_read_int(bs, 16);
		avail-=8;

		if (au_size > avail) break;
		assert(au_size>=2);

		is_rap = 1;

		type = gf_bs_read_int(bs, 4);
		stream_id = gf_bs_read_int(bs, 12);
		au_size -= 2;

		ch = saf_get_channel(read, stream_id, NULL);
		switch (type) {
		case 1:
		case 2:
		case 7:
			if (ch) {
				gf_bs_skip_bytes(bs, au_size);
			} else {
				SAFChannel *first = (SAFChannel *)gf_list_get(read->channels, 0);
				GF_SAFEALLOC(ch, SAFChannel);
				ch->stream_id = stream_id;
				ch->esd = gf_odf_desc_esd_new(0);
				ch->esd->ESID = stream_id;
				ch->esd->OCRESID = first ? first->stream_id : stream_id;
				ch->esd->slConfig->useRandomAccessPointFlag = 1;
				ch->esd->slConfig->AUSeqNumLength = 0;
				ch->esd->decoderConfig->objectTypeIndication = gf_bs_read_u8(bs);
				ch->esd->decoderConfig->streamType = gf_bs_read_u8(bs);
				ch->ts_res = ch->esd->slConfig->timestampResolution = gf_bs_read_u24(bs);
				ch->esd->decoderConfig->bufferSizeDB = gf_bs_read_u16(bs);
				au_size -= 7;
				if ((ch->esd->decoderConfig->objectTypeIndication == 0xFF) && (ch->esd->decoderConfig->streamType == 0xFF) ) {
					u16 mimeLen = gf_bs_read_u16(bs);
					gf_bs_skip_bytes(bs, mimeLen);
					au_size -= mimeLen+2;
				}
				if (type==7) {
					u16 urlLen = gf_bs_read_u16(bs);
					ch->esd->URLString = (char*)gf_malloc(sizeof(char)*(urlLen+1));
					gf_bs_read_data(bs, ch->esd->URLString, urlLen);
					ch->esd->URLString[urlLen] = 0;
					au_size -= urlLen+2;
				}
				if (au_size) {
					ch->esd->decoderConfig->decoderSpecificInfo->dataLength = au_size;
					ch->esd->decoderConfig->decoderSpecificInfo->data = (char*)gf_malloc(sizeof(char)*au_size);
					gf_bs_read_data(bs, ch->esd->decoderConfig->decoderSpecificInfo->data, au_size);
				}
				if (ch->esd->decoderConfig->streamType==4) ch->buffer_min=100;
				else if (ch->esd->decoderConfig->streamType==5) ch->buffer_min=400;
				else ch->buffer_min=0;

				if (read->needs_connection && (ch->esd->decoderConfig->streamType==GF_STREAM_SCENE)) {
					gf_list_add(read->channels, ch);
					read->needs_connection = 0;
					gf_service_connect_ack(read->service, NULL, GF_OK);
				} else if (read->needs_connection) {
					gf_odf_desc_del((GF_Descriptor *) ch->esd);
					gf_free(ch);
					ch = NULL;
				} else {
					GF_ObjectDescriptor *od;
					gf_list_add(read->channels, ch);

					od = (GF_ObjectDescriptor*)gf_odf_desc_new(GF_ODF_OD_TAG);
					gf_list_add(od->ESDescriptors, ch->esd);
					ch->esd = NULL;
					od->objectDescriptorID = ch->stream_id;
					gf_service_declare_media(read->service, (GF_Descriptor*)od, 0);

				}
			}
			break;
		case 4:
			if (ch) {
				bs_pos = gf_bs_get_position(bs);
				memset(&sl_hdr, 0, sizeof(GF_SLHeader));
				sl_hdr.accessUnitLength = au_size;
				sl_hdr.AU_sequenceNumber = au_sn;
				sl_hdr.compositionTimeStampFlag = 1;
				sl_hdr.compositionTimeStamp = cts;
				sl_hdr.randomAccessPointFlag = is_rap;
				if (read->start_range && (read->start_range*ch->ts_res>cts*1000)) {
					sl_hdr.compositionTimeStamp = read->start_range*ch->ts_res/1000;
				}
				gf_service_send_packet(read->service, ch->ch, read->saf_data+bs_pos, au_size, &sl_hdr, GF_OK);
			}
			gf_bs_skip_bytes(bs, au_size);
			break;
		case 3:
			if (ch) gf_service_send_packet(read->service, ch->ch, NULL, 0, NULL, GF_EOS);
			break;
		case 5:
			go = 0;
			read->run_state = 0;
			i=0;
			while ((ch = (SAFChannel *)gf_list_enum(read->channels, &i))) {
				gf_service_send_packet(read->service, ch->ch, NULL, 0, NULL, GF_EOS);
			}
			break;
		}
	}

	gf_bs_del(bs);
	if (bs_pos) {
		u32 remain = (u32) (read->saf_size - bs_pos);
		if (remain) memmove(read->saf_data, read->saf_data+bs_pos, sizeof(char)*remain);
		read->saf_size = remain;
	}
	SAF_Regulate(read);
}