static void MP2TS_SendPacket(M2TSIn *m2ts, GF_M2TS_PES_PCK *pck) { GF_SLHeader slh; /*pcr not initialized, don't send any data*/ if (! pck->stream->program->first_dts) return; if (!pck->stream->user) return; memset(&slh, 0, sizeof(GF_SLHeader)); slh.accessUnitStartFlag = (pck->flags & GF_M2TS_PES_PCK_AU_START) ? 1 : 0; if (slh.accessUnitStartFlag) { #if 0 slh.OCRflag = 1; slh.m2ts_pcr = 1; slh.objectClockReference = pck->stream->program->last_pcr_value; #else slh.OCRflag = 0; #endif slh.compositionTimeStampFlag = 1; slh.compositionTimeStamp = pck->PTS; if (pck->DTS) { slh.decodingTimeStampFlag = 1; slh.decodingTimeStamp = pck->DTS; } slh.randomAccessPointFlag = (pck->flags & GF_M2TS_PES_PCK_RAP) ? 1 : 0; } gf_term_on_sl_packet(m2ts->service, pck->stream->user, pck->data, pck->data_len, &slh, GF_OK); }
void OGG_EndOfFile(OGGReader *read) { OGGStream *st; u32 i=0; while ((st = gf_list_enum(read->streams, &i))) { gf_term_on_sl_packet(read->service, st->ch, NULL, 0, NULL, GF_EOS); } }
static void AAC_OnLiveData(AACReader *read, char *data, u32 data_size) { u32 pos; Bool sync; GF_BitStream *bs; ADTSHeader hdr; read->data = realloc(read->data, sizeof(char)*(read->data_size+data_size) ); memcpy(read->data + read->data_size, data, sizeof(char)*data_size); read->data_size += data_size; if (read->needs_connection) { read->needs_connection = 0; bs = gf_bs_new(read->data, read->data_size, GF_BITSTREAM_READ); sync = ADTS_SyncFrame(bs, 0, &hdr); gf_bs_del(bs); if (!sync) return; read->nb_ch = hdr.nb_ch; read->prof = hdr.profile; read->sr_idx = hdr.sr_idx; read->oti = hdr.is_mp2 ? read->prof+0x66-1 : 0x40; read->sample_rate = GF_M4ASampleRates[read->sr_idx]; read->is_live = 1; memset(&read->sl_hdr, 0, sizeof(GF_SLHeader)); gf_term_on_connect(read->service, NULL, GF_OK); AAC_SetupObject(read); } if (!read->ch) return; /*need a full adts header*/ if (read->data_size<=7) return; bs = gf_bs_new(read->data, read->data_size, GF_BITSTREAM_READ); hdr.frame_size = pos = 0; while (ADTS_SyncFrame(bs, 0, &hdr)) { pos = (u32) gf_bs_get_position(bs); read->sl_hdr.accessUnitStartFlag = 1; read->sl_hdr.accessUnitEndFlag = 1; read->sl_hdr.AU_sequenceNumber++; read->sl_hdr.compositionTimeStampFlag = 1; read->sl_hdr.compositionTimeStamp += 1024; gf_term_on_sl_packet(read->service, read->ch, read->data + pos, hdr.frame_size, &read->sl_hdr, GF_OK); gf_bs_skip_bytes(bs, hdr.frame_size); } pos = (u32) gf_bs_get_position(bs); gf_bs_del(bs); if (pos) { char *d; read->data_size -= pos; d = malloc(sizeof(char) * read->data_size); memcpy(d, read->data + pos, sizeof(char) * read->data_size); free(read->data); read->data = d; } AAC_RegulateDataRate(read); }
static void AC3_OnLiveData(AC3Reader *read, const char *data, u32 data_size) { u64 pos; Bool sync; GF_BitStream *bs; GF_AC3Header hdr; read->data = gf_realloc(read->data, sizeof(char)*(read->data_size+data_size) ); memcpy(read->data + read->data_size, data, sizeof(char)*data_size); read->data_size += data_size; if (read->needs_connection) { read->needs_connection = 0; bs = gf_bs_new(read->data, read->data_size, GF_BITSTREAM_READ); sync = gf_ac3_parser_bs(bs, &hdr, 1); gf_bs_del(bs); if (!sync) return; read->nb_ch = hdr.channels; read->sample_rate = hdr.sample_rate; read->is_live = 1; memset(&read->sl_hdr, 0, sizeof(GF_SLHeader)); gf_term_on_connect(read->service, NULL, GF_OK); AC3_SetupObject(read); } if (!read->ch) return; /*need a full ac3 header*/ if (read->data_size<=7) return; bs = gf_bs_new(read->data, read->data_size, GF_BITSTREAM_READ); hdr.framesize = 0; pos = 0; while (gf_ac3_parser_bs(bs, &hdr, 0)) { pos = gf_bs_get_position(bs); read->sl_hdr.accessUnitStartFlag = 1; read->sl_hdr.accessUnitEndFlag = 1; read->sl_hdr.AU_sequenceNumber++; read->sl_hdr.compositionTimeStampFlag = 1; read->sl_hdr.compositionTimeStamp += 1536; gf_term_on_sl_packet(read->service, read->ch, read->data + pos, hdr.framesize, &read->sl_hdr, GF_OK); gf_bs_skip_bytes(bs, hdr.framesize); } pos = gf_bs_get_position(bs); gf_bs_del(bs); if (pos) { char *d; read->data_size -= (u32) pos; d = gf_malloc(sizeof(char) * read->data_size); memcpy(d, read->data + pos, sizeof(char) * read->data_size); gf_free(read->data); read->data = d; } AC3_RegulateDataRate(read); }
static void MP3_OnLiveData(MP3Reader *read, char *data, u32 data_size) { u32 hdr, size, pos; if (read->needs_connection) { hdr = gf_mp3_get_next_header_mem(data, data_size, &pos); if (!hdr) return; read->sample_rate = gf_mp3_sampling_rate(hdr); read->oti = gf_mp3_object_type_indication(hdr); read->is_live = 1; memset(&read->sl_hdr, 0, sizeof(GF_SLHeader)); read->needs_connection = 0; gf_term_on_connect(read->service, NULL, GF_OK); mp3_setup_object(read); } if (!data_size) return; read->data = gf_realloc(read->data, sizeof(char)*(read->data_size+data_size) ); memcpy(read->data + read->data_size, data, sizeof(char)*data_size); read->data_size += data_size; if (!read->ch) return; data = read->data; data_size = read->data_size; while (1) { hdr = gf_mp3_get_next_header_mem(data, data_size, &pos); if (hdr) size = gf_mp3_frame_size(hdr); /*not enough data, copy over*/ if (!hdr || (pos+size>data_size)) { char *d = gf_malloc(sizeof(char) * data_size); memcpy(d, data, sizeof(char) * data_size); gf_free(read->data); read->data = d; read->data_size = data_size; MP3_RegulateDataRate(read); return; } read->sl_hdr.accessUnitStartFlag = 1; read->sl_hdr.accessUnitEndFlag = 1; read->sl_hdr.AU_sequenceNumber++; read->sl_hdr.compositionTimeStampFlag = 1; read->sl_hdr.compositionTimeStamp += gf_mp3_window_size(hdr); gf_term_on_sl_packet(read->service, read->ch, data + pos, size, &read->sl_hdr, GF_OK); data += pos + size; assert(data_size>=pos+size); data_size -= pos+size; } }
void Freenect_DepthCallback_GREY16(freenect_device *dev, void *v_depth, uint32_t timestamp) { FreenectIn *vcap = freenect_get_user(dev); if (vcap->depth_channel) { memcpy(vcap->depth_buf, v_depth, vcap->out_depth_size); vcap->depth_sl_header.compositionTimeStamp = timestamp; gf_term_on_sl_packet(vcap->service, vcap->depth_channel, (char *) vcap->depth_buf, vcap->out_depth_size, &vcap->depth_sl_header, GF_OK); } }
static void rtp_sl_packet_cbk(void *udta, char *payload, u32 size, GF_SLHeader *hdr, GF_Err e) { u64 cts, dts; RTPStream *ch = (RTPStream *)udta; if (!ch->rtcp_init) return; cts = hdr->compositionTimeStamp; dts = hdr->decodingTimeStamp; hdr->compositionTimeStamp -= ch->ts_offset; hdr->decodingTimeStamp -= ch->ts_offset; if (ch->rtp_ch->packet_loss) e = GF_REMOTE_SERVICE_ERROR; if (ch->owner->first_packet_drop && (hdr->packetSequenceNumber >= ch->owner->first_packet_drop) ) { if ( (hdr->packetSequenceNumber - ch->owner->first_packet_drop) % ch->owner->frequency_drop) gf_term_on_sl_packet(ch->owner->service, ch->channel, payload, size, hdr, e); } else { gf_term_on_sl_packet(ch->owner->service, ch->channel, payload, size, hdr, e); } hdr->compositionTimeStamp = cts; hdr->decodingTimeStamp = dts; }
static GFINLINE void MP2TS_SendSLPacket(M2TSIn *m2ts, GF_M2TS_SL_PCK *pck) { GF_SLHeader SLHeader, *slh = NULL; u32 SLHdrLen = 0; /*build a SL Header*/ if (((GF_M2TS_ES*)pck->stream)->slcfg) { gf_sl_depacketize(((GF_M2TS_ES*)pck->stream)->slcfg, &SLHeader, pck->data, pck->data_len, &SLHdrLen); SLHeader.m2ts_version_number_plus_one = pck->version_number + 1; slh = &SLHeader; } gf_term_on_sl_packet(m2ts->service, pck->stream->user, pck->data+SLHdrLen, pck->data_len-SLHdrLen, slh, GF_OK); }
void Freenect_DepthCallback_ColorGradient(freenect_device *dev, void *v_depth, uint32_t timestamp) { FreenectIn *vcap = freenect_get_user(dev); if (vcap->depth_channel) { u32 i; u16 *depth = (u16*)v_depth; /*remap to color RGB using freenect gamma*/ for (i=0; i<vcap->width*vcap->height; i++) { int pval = vcap->gamma[depth[i]]; int lb = pval & 0xff; switch (pval>>8) { case 0: vcap->depth_buf[3*i+0] = 255; vcap->depth_buf[3*i+1] = 255-lb; vcap->depth_buf[3*i+2] = 255-lb; break; case 1: vcap->depth_buf[3*i+0] = 255; vcap->depth_buf[3*i+1] = lb; vcap->depth_buf[3*i+2] = 0; break; case 2: vcap->depth_buf[3*i+0] = 255-lb; vcap->depth_buf[3*i+1] = 255; vcap->depth_buf[3*i+2] = 0; break; case 3: vcap->depth_buf[3*i+0] = 0; vcap->depth_buf[3*i+1] = 255; vcap->depth_buf[3*i+2] = lb; break; case 4: vcap->depth_buf[3*i+0] = 0; vcap->depth_buf[3*i+1] = 255-lb; vcap->depth_buf[3*i+2] = 255; break; case 5: vcap->depth_buf[3*i+0] = 0; vcap->depth_buf[3*i+1] = 0; vcap->depth_buf[3*i+2] = 255-lb; break; default: vcap->depth_buf[3*i+0] = 0; vcap->depth_buf[3*i+1] = 0; vcap->depth_buf[3*i+2] = 0; break; } } vcap->depth_sl_header.compositionTimeStamp = timestamp; gf_term_on_sl_packet(vcap->service, vcap->depth_channel, (char *) vcap->depth_buf, vcap->out_depth_size, &vcap->depth_sl_header, GF_OK); }
void Freenect_DepthCallback_GREY8(freenect_device *dev, void *v_depth, uint32_t timestamp) { FreenectIn *vcap = freenect_get_user(dev); if (vcap->depth_channel) { u32 i, j; u16 *depth = (u16*)v_depth; for (i=0; i<vcap->height; i++) { for (j=0; j<vcap->width; j++) { int pval = depth[j + i*vcap->width]; pval = (255*pval) / 2048; vcap->depth_buf[j + i*vcap->width] = pval; } } // vcap->depth_sl_header.compositionTimeStamp = timestamp; vcap->depth_sl_header.compositionTimeStamp ++; gf_term_on_sl_packet(vcap->service, vcap->depth_channel, (char *) vcap->depth_buf, vcap->out_depth_size, &vcap->depth_sl_header, GF_OK); } }
void Freenect_DepthCallback_RGBD(freenect_device *dev, void *v_depth, uint32_t timestamp) { FreenectIn *vcap = freenect_get_user(dev); if (vcap->depth_channel) { u32 i, j; u16 *depth = (u16*)v_depth; for (i=0; i<vcap->height; i++) { for (j=0; j<vcap->width; j++) { int idx_col = 3 * (j + i*vcap->width) ; int idx_depth = 4*(j + i*vcap->width) ; int pval = depth[i*vcap->width + j]; pval = 255 - (255*pval) / 2048; vcap->depth_buf[idx_depth ] = vcap->vid_buf[idx_col]; vcap->depth_buf[idx_depth + 1] = vcap->vid_buf[idx_col+1]; vcap->depth_buf[idx_depth + 2] = vcap->vid_buf[idx_col+2]; vcap->depth_buf[idx_depth + 3] = pval; } } vcap->depth_sl_header.compositionTimeStamp = timestamp; gf_term_on_sl_packet(vcap->service, vcap->depth_channel, (char *) vcap->depth_buf, vcap->out_depth_size, &vcap->depth_sl_header, GF_OK); } }
static u32 FFDemux_Run(void *par) { AVPacket pkt; s64 seek_to; GF_NetworkCommand com; GF_NetworkCommand map; GF_SLHeader slh; FFDemux *ffd = (FFDemux *) par; memset(&map, 0, sizeof(GF_NetworkCommand)); map.command_type = GF_NET_CHAN_MAP_TIME; memset(&com, 0, sizeof(GF_NetworkCommand)); com.command_type = GF_NET_BUFFER_QUERY; memset(&slh, 0, sizeof(GF_SLHeader)); slh.compositionTimeStampFlag = slh.decodingTimeStampFlag = 1; while (ffd->is_running) { if ((!ffd->video_ch && (ffd->video_st>=0)) || (!ffd->audio_ch && (ffd->audio_st>=0))) { gf_sleep(100); continue; } if ((ffd->seek_time>=0) && ffd->seekable) { seek_to = (s64) (AV_TIME_BASE*ffd->seek_time); av_seek_frame(ffd->ctx, -1, seek_to, AVSEEK_FLAG_BACKWARD); ffd->seek_time = -1; } pkt.stream_index = -1; /*EOF*/ if (av_read_frame(ffd->ctx, &pkt) <0) break; if (pkt.pts == AV_NOPTS_VALUE) pkt.pts = pkt.dts; if (!pkt.dts) pkt.dts = pkt.pts; slh.compositionTimeStamp = pkt.pts; slh.decodingTimeStamp = pkt.dts; gf_mx_p(ffd->mx); /*blindly send audio as soon as video is init*/ if (ffd->audio_ch && (pkt.stream_index == ffd->audio_st) ) { // u64 seek_audio = ffd->seek_time ? (u64) (s64) (ffd->seek_time*ffd->audio_tscale.den) : 0; slh.compositionTimeStamp *= ffd->audio_tscale.num; slh.decodingTimeStamp *= ffd->audio_tscale.num; #if 0 if (slh.compositionTimeStamp < seek_audio) { slh.decodingTimeStamp = slh.compositionTimeStamp = seek_audio; } #endif gf_term_on_sl_packet(ffd->service, ffd->audio_ch, (char *) pkt.data, pkt.size, &slh, GF_OK); } else if (ffd->video_ch && (pkt.stream_index == ffd->video_st)) { // u64 seek_video = ffd->seek_time ? (u64) (s64) (ffd->seek_time*ffd->video_tscale.den) : 0; slh.compositionTimeStamp *= ffd->video_tscale.num; slh.decodingTimeStamp *= ffd->video_tscale.num; #if 0 if (slh.compositionTimeStamp < seek_video) { slh.decodingTimeStamp = slh.compositionTimeStamp = seek_video; } #endif gf_term_on_sl_packet(ffd->service, ffd->video_ch, (char *) pkt.data, pkt.size, &slh, GF_OK); } gf_mx_v(ffd->mx); av_free_packet(&pkt); /*sleep untill the buffer occupancy is too low - note that this work because all streams in this demuxer are synchronized*/ while (ffd->audio_run || ffd->video_run) { gf_term_on_command(ffd->service, &com, GF_OK); if (com.buffer.occupancy < com.buffer.max) break; gf_sleep(10); } if (!ffd->audio_run && !ffd->video_run) break; } /*signal EOS*/ if (ffd->audio_ch) gf_term_on_sl_packet(ffd->service, ffd->audio_ch, NULL, 0, NULL, GF_EOS); if (ffd->video_ch) gf_term_on_sl_packet(ffd->service, ffd->video_ch, NULL, 0, NULL, GF_EOS); ffd->is_running = 2; return 0; }
static u32 FFDemux_Run(void *par) { AVPacket pkt; s64 seek_to; u64 seek_audio, seek_video; Bool video_init, do_seek, map_audio_time, map_video_time; GF_NetworkCommand com; GF_NetworkCommand map; GF_SLHeader slh; FFDemux *ffd = (FFDemux *) par; memset(&map, 0, sizeof(GF_NetworkCommand)); map.command_type = GF_NET_CHAN_MAP_TIME; memset(&com, 0, sizeof(GF_NetworkCommand)); com.command_type = GF_NET_CHAN_BUFFER_QUERY; memset(&slh, 0, sizeof(GF_SLHeader)); slh.compositionTimeStampFlag = slh.decodingTimeStampFlag = 1; seek_to = (s64) (AV_TIME_BASE*ffd->seek_time); map_video_time = !ffd->seekable; video_init = (seek_to && ffd->video_ch) ? 0 : 1; seek_audio = seek_video = 0; if (ffd->seekable && (ffd->audio_st>=0)) seek_audio = (u64) (s64) (ffd->seek_time*ffd->audio_tscale.den); if (ffd->seekable && (ffd->video_st>=0)) seek_video = (u64) (s64) (ffd->seek_time*ffd->video_tscale.den); /*it appears that ffmpeg has trouble resyncing on some mpeg files - we trick it by restarting to 0 to get the first video frame, and only then seek*/ if (ffd->seekable) av_seek_frame(ffd->ctx, -1, video_init ? seek_to : 0, AVSEEK_FLAG_BACKWARD); do_seek = !video_init; map_audio_time = video_init ? ffd->unreliable_audio_timing : 0; while (ffd->is_running) { pkt.stream_index = -1; /*EOF*/ if (av_read_frame(ffd->ctx, &pkt) <0) break; if (pkt.pts == AV_NOPTS_VALUE) pkt.pts = pkt.dts; if (!pkt.dts) pkt.dts = pkt.pts; slh.compositionTimeStamp = pkt.pts; slh.decodingTimeStamp = pkt.dts; gf_mx_p(ffd->mx); /*blindly send audio as soon as video is init*/ if (ffd->audio_ch && (pkt.stream_index == ffd->audio_st) && !do_seek) { slh.compositionTimeStamp *= ffd->audio_tscale.num; slh.decodingTimeStamp *= ffd->audio_tscale.num; if (map_audio_time) { map.base.on_channel = ffd->audio_ch; map.map_time.media_time = ffd->seek_time; /*mapwith TS=0 since we don't use SL*/ map.map_time.timestamp = 0; map.map_time.reset_buffers = 1; map_audio_time = 0; gf_term_on_command(ffd->service, &map, GF_OK); } else if (slh.compositionTimeStamp < seek_audio) { slh.decodingTimeStamp = slh.compositionTimeStamp = seek_audio; } gf_term_on_sl_packet(ffd->service, ffd->audio_ch, pkt.data, pkt.size, &slh, GF_OK); } else if (ffd->video_ch && (pkt.stream_index == ffd->video_st)) { slh.compositionTimeStamp *= ffd->video_tscale.num; slh.decodingTimeStamp *= ffd->video_tscale.num; /*if we get pts = 0 after a seek the demuxer is reseting PTSs, so force map time*/ if ((!do_seek && seek_to && !slh.compositionTimeStamp) || (map_video_time) ) { seek_to = 0; map_video_time = 0; map.base.on_channel = ffd->video_ch; map.map_time.timestamp = (u64) pkt.pts; // map.map_time.media_time = ffd->seek_time; map.map_time.media_time = 0; map.map_time.reset_buffers = 0; gf_term_on_command(ffd->service, &map, GF_OK); } else if (slh.compositionTimeStamp < seek_video) { slh.decodingTimeStamp = slh.compositionTimeStamp = seek_video; } gf_term_on_sl_packet(ffd->service, ffd->video_ch, pkt.data, pkt.size, &slh, GF_OK); video_init = 1; } gf_mx_v(ffd->mx); av_free_packet(&pkt); /*here's the trick - only seek after sending the first packets of each stream - this allows ffmpeg video decoders to resync properly*/ if (do_seek && video_init && ffd->seekable) { av_seek_frame(ffd->ctx, -1, seek_to, AVSEEK_FLAG_BACKWARD); do_seek = 0; map_audio_time = ffd->unreliable_audio_timing; } /*sleep untill the buffer occupancy is too low - note that this work because all streams in this demuxer are synchronized*/ while (1) { if (ffd->audio_ch) { com.base.on_channel = ffd->audio_ch; gf_term_on_command(ffd->service, &com, GF_OK); if (com.buffer.occupancy < ffd->data_buffer_ms) break; } if (ffd->video_ch) { com.base.on_channel = ffd->video_ch; gf_term_on_command(ffd->service, &com, GF_OK); if (com.buffer.occupancy < ffd->data_buffer_ms) break; } gf_sleep(10); /*escape if disconnect*/ if (!ffd->audio_run && !ffd->video_run) break; } if (!ffd->audio_run && !ffd->video_run) break; } /*signal EOS*/ if (ffd->audio_ch) gf_term_on_sl_packet(ffd->service, ffd->audio_ch, NULL, 0, NULL, GF_EOS); if (ffd->video_ch) gf_term_on_sl_packet(ffd->service, ffd->video_ch, NULL, 0, NULL, GF_EOS); ffd->is_running = 2; return 0; }
void RP_ProcessRTP(RTPStream *ch, char *pck, u32 size) { GF_NetworkCommand com; GF_Err e; GF_RTPHeader hdr; u32 PayloadStart; ch->rtp_bytes += size; /*first decode RTP*/ e = gf_rtp_decode_rtp(ch->rtp_ch, pck, size, &hdr, &PayloadStart); /*corrupted or NULL data*/ if (e || (PayloadStart >= size)) { //gf_term_on_sl_packet(ch->owner->service, ch->channel, NULL, 0, NULL, GF_CORRUPTED_DATA); return; } /*if we must notify some timing, do it now. If the channel has no range, this should NEVER be called*/ if (ch->check_rtp_time /*&& gf_rtp_is_active(ch->rtp_ch)*/) { Double ch_time; /*it may happen that we still receive packets from a previous "play" request. If this is the case, filter until we reach the indicated rtptime*/ if (ch->rtp_ch->rtp_time && (ch->rtp_ch->rtp_first_SN > hdr.SequenceNumber) && (ch->rtp_ch->rtp_time < hdr.TimeStamp) ) { GF_LOG(GF_LOG_WARNING, GF_LOG_RTP, ("[RTP] Rejecting too early packet (TS %d vs signaled rtp time %d - diff %d ms)\n", hdr.TimeStamp, ch->rtp_ch->rtp_time, ((hdr.TimeStamp - ch->rtp_ch->rtp_time)*1000) / ch->rtp_ch->TimeScale)); return; } ch_time = gf_rtp_get_current_time(ch->rtp_ch); /*this is the first packet on the channel (no PAUSE)*/ if (ch->check_rtp_time == RTP_SET_TIME_RTP) { /*Note: in a SEEK with RTSP, the rtp-info time given by the server is the rtp time of the desired range. But the server may (and should) send from the previous I frame on video, so the time of the first rtp packet after a SEEK can actually be less than CurrentStart. We don't drop these packets in order to see the maximum video. We could drop it, this would mean wait for next RAP...*/ memset(&com, 0, sizeof(com)); com.command_type = GF_NET_CHAN_MAP_TIME; com.base.on_channel = ch->channel; if (ch->rtsp) { com.map_time.media_time = ch->current_start + ch_time; } else { com.map_time.media_time = 0; } com.map_time.timestamp = hdr.TimeStamp; com.map_time.reset_buffers = 0; gf_term_on_command(ch->owner->service, &com, GF_OK); GF_LOG(GF_LOG_INFO, GF_LOG_RTP, ("[RTP] Mapping RTP Time seq %d TS %d Media Time %g - rtp info seq %d TS %d\n", hdr.SequenceNumber, hdr.TimeStamp, com.map_time.media_time, ch->rtp_ch->rtp_first_SN, ch->rtp_ch->rtp_time )); /*skip RTCP clock init when RTSP is used*/ if (ch->rtsp) ch->rtcp_init = 1; // if (ch->depacketizer->payt==GF_RTP_PAYT_H264_AVC) ch->depacketizer->flags |= GF_RTP_AVC_WAIT_RAP; } /*this is RESUME on channel, filter packet based on time (darwin seems to send couple of packet before) do not fetch if we're below 10 ms or <0, because this means we already have this packet - as the PAUSE is issued with the RTP currentTime*/ else if (ch_time <= 0.021) { return; } ch->check_rtp_time = RTP_SET_TIME_NONE; } gf_rtp_depacketizer_process(ch->depacketizer, &hdr, pck + PayloadStart, size - PayloadStart); /*last check: signal EOS if we're close to end range in case the server do not send RTCP BYE*/ if ((ch->flags & RTP_HAS_RANGE) && !(ch->flags & RTP_EOS) ) { /*also check last CTS*/ Double ts = (Double) ((u32) ch->depacketizer->sl_hdr.compositionTimeStamp - hdr.TimeStamp); ts /= gf_rtp_get_clockrate(ch->rtp_ch); if (ABSDIFF(ch->range_end, (ts + ch->current_start + gf_rtp_get_current_time(ch->rtp_ch)) ) < 0.2) { ch->flags |= RTP_EOS; ch->stat_stop_time = gf_sys_clock(); gf_term_on_sl_packet(ch->owner->service, ch->channel, NULL, 0, NULL, GF_EOS); } } }
static void M2TS_OnEvent(GF_M2TS_Demuxer *ts, u32 evt_type, void *param) { GF_Event evt; M2TSIn *m2ts = (M2TSIn *) ts->user; switch (evt_type) { case GF_M2TS_EVT_PAT_UPDATE: /* example code showing how to forward an event from MPEG-2 TS input service to GPAC user*/ #if 0 { GF_Event evt; evt.type = GF_EVENT_FORWARDED; evt.forwarded_event.forward_type = GF_EVT_FORWARDED_MPEG2; evt.forwarded_event.forward_type = GF_EVT_FORWARDED_MPEG2; evt.forwarded_event.service_event_type = evt_type; evt.forwarded_event.param = param; gf_term_on_service_event(m2ts->service, &evt); } #endif break; case GF_M2TS_EVT_AIT_FOUND: evt.type = GF_EVENT_FORWARDED; evt.forwarded_event.forward_type = GF_EVT_FORWARDED_MPEG2; evt.forwarded_event.service_event_type = evt_type; evt.forwarded_event.param = param; gf_term_on_service_event(m2ts->service, &evt); break; case GF_M2TS_EVT_PAT_FOUND: /* In case the TS has one program, wait for the PMT to send connect, in case of IOD in PMT */ if (gf_list_count(m2ts->ts->programs) != 1) { gf_term_on_connect(m2ts->service, NULL, GF_OK); m2ts->is_connected = 1; } /* Send the TS to the a user if needed. Useful to check the number of received programs*/ evt.type = GF_EVENT_FORWARDED; evt.forwarded_event.forward_type = GF_M2TS_EVT_PAT_FOUND; evt.forwarded_event.service_event_type = evt_type; evt.forwarded_event.param = ts; gf_term_on_service_event(m2ts->service, &evt); break; case GF_M2TS_EVT_PMT_FOUND: if (gf_list_count(m2ts->ts->programs) == 1) { gf_term_on_connect(m2ts->service, NULL, GF_OK); m2ts->is_connected = 1; } /*do not declare if single program was requested for playback*/ MP2TS_SetupProgram(m2ts, param, m2ts->request_all_pids, m2ts->request_all_pids ? 0 : 1); M2TS_FlushRequested(m2ts); break; case GF_M2TS_EVT_PMT_REPEAT: // case GF_M2TS_EVT_PMT_UPDATE: M2TS_FlushRequested(m2ts); break; case GF_M2TS_EVT_SDT_REPEAT: case GF_M2TS_EVT_SDT_UPDATE: case GF_M2TS_EVT_SDT_FOUND: M2TS_FlushRequested(m2ts); break; case GF_M2TS_EVT_DVB_GENERAL: if (m2ts->eit_channel) { GF_M2TS_SL_PCK *pck = (GF_M2TS_SL_PCK *)param; gf_term_on_sl_packet(m2ts->service, m2ts->eit_channel, pck->data, pck->data_len, NULL, GF_OK); } break; case GF_M2TS_EVT_PES_PCK: MP2TS_SendPacket(m2ts, param); break; case GF_M2TS_EVT_SL_PCK: MP2TS_SendSLPacket(m2ts, param); break; case GF_M2TS_EVT_AAC_CFG: { GF_M2TS_PES_PCK *pck = (GF_M2TS_PES_PCK*)param; if (!pck->stream->first_dts) { gf_m2ts_set_pes_framing(pck->stream, GF_M2TS_PES_FRAMING_SKIP_NO_RESET); MP2TS_DeclareStream(m2ts, pck->stream, pck->data, pck->data_len); if (ts->file || ts->dnload) ts->file_regulate = 1; pck->stream->first_dts=1; /*force scene regeneration*/ gf_term_add_media(m2ts->service, NULL, 0); } } break; case GF_M2TS_EVT_PES_PCR: /*send pcr*/ if (((GF_M2TS_PES_PCK *) param)->stream && ((GF_M2TS_PES_PCK *) param)->stream->user) { GF_SLHeader slh; memset(&slh, 0, sizeof(GF_SLHeader) ); slh.OCRflag = 1; slh.m2ts_pcr = ( ((GF_M2TS_PES_PCK *) param)->flags & GF_M2TS_PES_PCK_DISCONTINUITY) ? 2 : 1; slh.objectClockReference = ((GF_M2TS_PES_PCK *) param)->PTS; gf_term_on_sl_packet(m2ts->service, ((GF_M2TS_PES_PCK *) param)->stream->user, NULL, 0, &slh, GF_OK); } ((GF_M2TS_PES_PCK *) param)->stream->program->first_dts = 1; if ( ((GF_M2TS_PES_PCK *) param)->flags & GF_M2TS_PES_PCK_DISCONTINUITY) { #if 0 if (ts->pcr_last) { ts->pcr_last = ((GF_M2TS_PES_PCK *) param)->PTS; ts->stb_at_last_pcr = gf_sys_clock(); } #endif GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[M2TS In] PCR discontinuity - switching from old STB "LLD" to new one "LLD"\n", ts->pcr_last, ((GF_M2TS_PES_PCK *) param)->PTS)); /*FIXME - we need to find a way to treat PCR discontinuities correctly while ignoring broken PCR discontinuities seen in many HLS solutions*/ return; } if (ts->file_regulate) { u64 pcr = ((GF_M2TS_PES_PCK *) param)->PTS; u32 stb = gf_sys_clock(); if (m2ts->regulation_pcr_pid==0) { /*we pick the first PCR PID for file regulation - we don't need to make sure this is the PCR of a program being plyaed as we only check buffer levels, not DTS/PTS of the streams in the regulation step*/ m2ts->regulation_pcr_pid = ((GF_M2TS_PES_PCK *) param)->stream->pid; } else if (m2ts->regulation_pcr_pid != ((GF_M2TS_PES_PCK *) param)->stream->pid) { return; } if (ts->pcr_last) { s32 diff; if (pcr < ts->pcr_last) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[M2TS In] PCR "LLU" less than previous PCR "LLU"\n", ((GF_M2TS_PES_PCK *) param)->PTS, ts->pcr_last)); ts->pcr_last = pcr; ts->stb_at_last_pcr = gf_sys_clock(); diff = 0; } else { u64 pcr_diff = (pcr - ts->pcr_last); pcr_diff /= 27000; if (pcr_diff>1000) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS In] PCR diff too big: "LLU" ms - PCR "LLU" - previous PCR "LLU" - error in TS ?\n", pcr_diff, ((GF_M2TS_PES_PCK *) param)->PTS, ts->pcr_last)); diff = 100; } else { diff = (u32) pcr_diff - (stb - ts->stb_at_last_pcr); } } if (diff<0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[M2TS In] Demux not going fast enough according to PCR (drift %d, pcr: "LLU", last pcr: "LLU")\n", diff, pcr, ts->pcr_last)); } else if (diff>0) { u32 sleep_for=1; #ifndef GPAC_DISABLE_LOG u32 nb_sleep=0; #endif /*query buffer level, don't sleep if too low*/ GF_NetworkCommand com; com.command_type = GF_NET_BUFFER_QUERY; while (ts->run_state) { gf_term_on_command(m2ts->service, &com, GF_OK); if (com.buffer.occupancy < M2TS_BUFFER_MAX) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[M2TS In] Demux not going to sleep: buffer occupancy %d ms\n", com.buffer.occupancy)); break; } /*We don't sleep for the entire buffer occupancy, because we would take the risk of starving the audio chains. We try to keep buffers half full*/ #ifndef GPAC_DISABLE_LOG if (!nb_sleep) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[M2TS In] Demux going to sleep (buffer occupancy %d ms)\n", com.buffer.occupancy)); } nb_sleep++; #endif gf_sleep(sleep_for); } #ifndef GPAC_DISABLE_LOG if (nb_sleep) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[M2TS In] Demux resume after %d ms - current buffer occupancy %d ms\n", sleep_for*nb_sleep, com.buffer.occupancy)); } #endif ts->nb_pck = 0; ts->pcr_last = pcr; ts->stb_at_last_pcr = gf_sys_clock(); } else { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[M2TS In] Demux drift according to PCR (drift %d, pcr: "LLD", last pcr: "LLD")\n", diff, pcr, ts->pcr_last)); } } else { ts->pcr_last = pcr; ts->stb_at_last_pcr = gf_sys_clock(); } } break; case GF_M2TS_EVT_TDT: if (m2ts->hybrid_on) { u32 i, count; GF_M2TS_TDT_TOT *tdt = (GF_M2TS_TDT_TOT *)param; GF_NetworkCommand com; memset(&com, 0, sizeof(com)); com.command_type = GF_NET_CHAN_MAP_TIME; com.map_time.media_time = tdt->hour*3600+tdt->minute*60+tdt->second; com.map_time.reset_buffers = 0; count = gf_list_count(ts->programs); for (i=0; i<count; i++) { GF_M2TS_Program *prog = gf_list_get(ts->programs, i); u32 j, count2; if (prog->tdt_found || !prog->last_pcr_value) /*map TDT one time, after we received a PCR*/ continue; prog->tdt_found = 1; count2 = gf_list_count(prog->streams); com.map_time.timestamp = prog->last_pcr_value/300; for (j=0; j<count2; j++) { GF_M2TS_ES * stream = gf_list_get(prog->streams, j); if (stream->user) { com.base.on_channel = stream->user; gf_term_on_command(m2ts->service, &com, GF_OK); } } GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("[M2TS In] Mapping TDT Time %04d/%02d/%02d %02d:%02d:%02d and PCR time "LLD" on program %d\n", tdt->year, tdt->month, tdt->day, tdt->hour, tdt->minute, tdt->second, com.map_time.timestamp, prog->number)); } } break; case GF_M2TS_EVT_TOT: break; } }
void RP_ProcessRTCP(RTPStream *ch, char *pck, u32 size) { Bool has_sr; GF_Err e; if (ch->status == RTP_Connected) return; ch->rtcp_bytes += size; e = gf_rtp_decode_rtcp(ch->rtp_ch, pck, size, &has_sr); if (e<0) return; /*update sync if on pure RTP*/ if (!ch->rtcp_init && has_sr) { Double ntp_clock; ntp_clock = ch->rtp_ch->last_SR_NTP_sec; ntp_clock += ((Double)ch->rtp_ch->last_SR_NTP_frac)/0xFFFFFFFF; if (!ch->owner->last_ntp) { //add safety in case this RTCP report is received before another report //that was supposed to come in earlier (with earlier NTP) //Double safety_offset, time = ch->rtp_ch->last_SR_rtp_time; //time /= ch->rtp_ch->TimeScale; //safety_offset = time/2; ch->owner->last_ntp = ntp_clock; } if (ntp_clock >= ch->owner->last_ntp) { ntp_clock -= ch->owner->last_ntp; } else { ntp_clock = 0; } //assert(ch->rtp_ch->last_SR_rtp_time >= (u64) (ntp_clock * ch->rtp_ch->TimeScale)); ch->ts_offset = ch->rtp_ch->last_SR_rtp_time; ch->ts_offset -= (s64) (ntp_clock * ch->rtp_ch->TimeScale); #if 0 GF_NetworkCommand com; memset(&com, 0, sizeof(com)); com.command_type = GF_NET_CHAN_MAP_TIME; com.base.on_channel = ch->channel; com.map_time.media_time = ntp; if (com.map_time.media_time >= ch->owner->last_ntp) { com.map_time.media_time -= ch->owner->last_ntp; } else { com.map_time.media_time = 0; } com.map_time.timestamp = ch->rtp_ch->last_SR_rtp_time; com.map_time.reset_buffers = 1; gf_term_on_command(ch->owner->service, &com, GF_OK); #endif GF_LOG(GF_LOG_INFO, GF_LOG_RTP, ("[RTCP] At %d Using Sender Report to map RTP TS %d to NTP clock %g - new TS offset "LLD" \n", gf_sys_clock(), ch->rtp_ch->last_SR_rtp_time, ntp_clock, ch->ts_offset )); ch->rtcp_init = 1; ch->check_rtp_time = RTP_SET_TIME_NONE; } if (e == GF_EOS) { ch->flags |= RTP_EOS; ch->stat_stop_time = gf_sys_clock(); gf_term_on_sl_packet(ch->owner->service, ch->channel, NULL, 0, NULL, GF_EOS); } }
static void SAF_NetIO(void *cbk, GF_NETIO_Parameter *param) { GF_Err e; Bool is_rap, go; SAFChannel *ch; u32 cts, au_sn, au_size, type, i, stream_id; u64 bs_pos; GF_BitStream *bs; GF_SLHeader sl_hdr; SAFIn *read = (SAFIn *) cbk; e = param->error; /*done*/ if (param->msg_type==GF_NETIO_DATA_TRANSFERED) { if (read->stream && (read->saf_type==SAF_FILE_REMOTE)) read->saf_type = SAF_FILE_LOCAL; return; } else { /*handle service message*/ gf_term_download_update_stats(read->dnload); if (param->msg_type!=GF_NETIO_DATA_EXCHANGE) { if (e<0) { if (read->needs_connection) { read->needs_connection = 0; gf_term_on_connect(read->service, NULL, e); } return; } if (read->needs_connection) { u32 total_size; gf_dm_sess_get_stats(read->dnload, NULL, NULL, &total_size, NULL, NULL, NULL); if (!total_size) read->saf_type = SAF_LIVE_STREAM; } return; } } if (!param->size) return; if (!read->run_state) return; if (read->alloc_size < read->saf_size + param->size) { read->saf_data = (char*)gf_realloc(read->saf_data, sizeof(char)*(read->saf_size + param->size) ); read->alloc_size = read->saf_size + param->size; } memcpy(read->saf_data + read->saf_size, param->data, sizeof(char)*param->size); read->saf_size += param->size; /*first AU not complete yet*/ if (read->saf_size<10) return; bs = gf_bs_new(read->saf_data, read->saf_size, GF_BITSTREAM_READ); bs_pos = 0; go = 1; while (go) { u64 avail = gf_bs_available(bs); bs_pos = gf_bs_get_position(bs); if (avail<10) break; is_rap = gf_bs_read_int(bs, 1); au_sn = gf_bs_read_int(bs, 15); gf_bs_read_int(bs, 2); cts = gf_bs_read_int(bs, 30); au_size = gf_bs_read_int(bs, 16); avail-=8; if (au_size > avail) break; assert(au_size>=2); is_rap = 1; type = gf_bs_read_int(bs, 4); stream_id = gf_bs_read_int(bs, 12); au_size -= 2; ch = saf_get_channel(read, stream_id, NULL); switch (type) { case 1: case 2: case 7: if (ch) { gf_bs_skip_bytes(bs, au_size); } else { SAFChannel *first = (SAFChannel *)gf_list_get(read->channels, 0); GF_SAFEALLOC(ch, SAFChannel); ch->stream_id = stream_id; ch->esd = gf_odf_desc_esd_new(0); ch->esd->ESID = stream_id; ch->esd->OCRESID = first ? first->stream_id : stream_id; ch->esd->slConfig->useRandomAccessPointFlag = 1; ch->esd->slConfig->AUSeqNumLength = 0; ch->esd->decoderConfig->objectTypeIndication = gf_bs_read_u8(bs); ch->esd->decoderConfig->streamType = gf_bs_read_u8(bs); ch->ts_res = ch->esd->slConfig->timestampResolution = gf_bs_read_u24(bs); ch->esd->decoderConfig->bufferSizeDB = gf_bs_read_u16(bs); au_size -= 7; if ((ch->esd->decoderConfig->objectTypeIndication == 0xFF) && (ch->esd->decoderConfig->streamType == 0xFF) ) { u16 mimeLen = gf_bs_read_u16(bs); gf_bs_skip_bytes(bs, mimeLen); au_size -= mimeLen+2; } if (type==7) { u16 urlLen = gf_bs_read_u16(bs); ch->esd->URLString = (char*)gf_malloc(sizeof(char)*(urlLen+1)); gf_bs_read_data(bs, ch->esd->URLString, urlLen); ch->esd->URLString[urlLen] = 0; au_size -= urlLen+2; } if (au_size) { ch->esd->decoderConfig->decoderSpecificInfo->dataLength = au_size; ch->esd->decoderConfig->decoderSpecificInfo->data = (char*)gf_malloc(sizeof(char)*au_size); gf_bs_read_data(bs, ch->esd->decoderConfig->decoderSpecificInfo->data, au_size); } if (ch->esd->decoderConfig->streamType==4) ch->buffer_min=100; else if (ch->esd->decoderConfig->streamType==5) ch->buffer_min=400; else ch->buffer_min=0; if (read->needs_connection && (ch->esd->decoderConfig->streamType==GF_STREAM_SCENE)) { gf_list_add(read->channels, ch); read->needs_connection = 0; gf_term_on_connect(read->service, NULL, GF_OK); } else if (read->needs_connection) { gf_odf_desc_del((GF_Descriptor *) ch->esd); gf_free(ch); ch = NULL; } else { GF_ObjectDescriptor *od; gf_list_add(read->channels, ch); od = (GF_ObjectDescriptor*)gf_odf_desc_new(GF_ODF_OD_TAG); gf_list_add(od->ESDescriptors, ch->esd); ch->esd = NULL; od->objectDescriptorID = ch->stream_id; gf_term_add_media(read->service, (GF_Descriptor*)od, 0); } } break; case 4: if (ch) { bs_pos = gf_bs_get_position(bs); memset(&sl_hdr, 0, sizeof(GF_SLHeader)); sl_hdr.accessUnitLength = au_size; sl_hdr.AU_sequenceNumber = au_sn; sl_hdr.compositionTimeStampFlag = 1; sl_hdr.compositionTimeStamp = cts; sl_hdr.randomAccessPointFlag = is_rap; if (read->start_range && (read->start_range*ch->ts_res>cts*1000)) { sl_hdr.compositionTimeStamp = read->start_range*ch->ts_res/1000; } gf_term_on_sl_packet(read->service, ch->ch, read->saf_data+bs_pos, au_size, &sl_hdr, GF_OK); } gf_bs_skip_bytes(bs, au_size); break; case 3: if (ch) gf_term_on_sl_packet(read->service, ch->ch, NULL, 0, NULL, GF_EOS); break; case 5: go = 0; read->run_state = 0; i=0; while ((ch = (SAFChannel *)gf_list_enum(read->channels, &i))) { gf_term_on_sl_packet(read->service, ch->ch, NULL, 0, NULL, GF_EOS); } break; } } gf_bs_del(bs); if (bs_pos) { u32 remain = (u32) (read->saf_size - bs_pos); if (remain) memmove(read->saf_data, read->saf_data+bs_pos, sizeof(char)*remain); read->saf_size = remain; } SAF_Regulate(read); }