Exemplo n.º 1
0
static int close_file( hnd_t handle, int64_t largest_pts, int64_t second_largest_pts )
{
    mp4_hnd_t *p_mp4 = handle;

    if( !p_mp4 )
        return 0;

    if( p_mp4->p_config )
        gf_odf_avc_cfg_del( p_mp4->p_config );

    if( p_mp4->p_sample )
    {
        if( p_mp4->p_sample->data )
            free( p_mp4->p_sample->data );

        p_mp4->p_sample->dataLength = 0;
        gf_isom_sample_del( &p_mp4->p_sample );
    }

    if( p_mp4->p_file )
    {
        if( p_mp4->i_track )
        {
            /* The mdhd duration is defined as CTS[final] - CTS[0] + duration of last frame.
             * The mdhd duration (in seconds) should be able to be longer than the tkhd duration since the track is managed by edts.
             * So, if mdhd duration is equal to the last DTS or less, we give the last composition time delta to the last sample duration.
             * And then, the mdhd duration is updated, but it time-wise doesn't give the actual duration.
             * The tkhd duration is the actual track duration. */
            uint64_t mdhd_duration = (2 * largest_pts - second_largest_pts) * p_mp4->i_time_inc;
            if( mdhd_duration != gf_isom_get_media_duration( p_mp4->p_file, p_mp4->i_track ) )
            {
                uint64_t last_dts = gf_isom_get_sample_dts( p_mp4->p_file, p_mp4->i_track, p_mp4->i_numframe );
                uint32_t last_duration = (uint32_t)( mdhd_duration > last_dts ? mdhd_duration - last_dts : (largest_pts - second_largest_pts) * p_mp4->i_time_inc );
                gf_isom_set_last_sample_duration( p_mp4->p_file, p_mp4->i_track, last_duration );
            }

            /* Write an Edit Box if the first CTS offset is positive.
             * A media_time is given by not the mvhd timescale but rather the mdhd timescale.
             * The reason is that an Edit Box maps the presentation time-line to the media time-line.
             * Any demuxers should follow the Edit Box if it exists. */
            GF_ISOSample *sample = gf_isom_get_sample_info( p_mp4->p_file, p_mp4->i_track, 1, NULL, NULL );
            if( sample && sample->CTS_Offset > 0 )
            {
                uint32_t mvhd_timescale = gf_isom_get_timescale( p_mp4->p_file );
                uint64_t tkhd_duration = (uint64_t)( mdhd_duration * ( (double)mvhd_timescale / p_mp4->i_time_res ) );
                gf_isom_append_edit_segment( p_mp4->p_file, p_mp4->i_track, tkhd_duration, sample->CTS_Offset, GF_ISOM_EDIT_NORMAL );
            }
            gf_isom_sample_del( &sample );

            recompute_bitrate_mp4( p_mp4->p_file, p_mp4->i_track );
        }
        gf_isom_set_pl_indication( p_mp4->p_file, GF_ISOM_PL_VISUAL, 0x15 );
        gf_isom_set_storage_mode( p_mp4->p_file, GF_ISOM_STORE_FLAT );
        gf_isom_close( p_mp4->p_file );
    }

    free( p_mp4 );

    return 0;
}
Exemplo n.º 2
0
//保存文件
bool EasyMP4Writer::SaveFile()
{
	if (m_psps)
	{
		delete m_psps;
		m_psps = NULL;
	}
	if (m_ppps)
	{
		delete m_ppps;
		m_ppps = NULL;
	}
	m_spslen=0;
	m_ppslen=0;

	m_audiostartimestamp=-1;
	m_videostartimestamp=-1;
	if (p_file)
	{
		gf_isom_close(p_file);
		p_file=NULL;
	}
	if(p_config)
	{
	//	delete p_config->pictureParameterSets;
		p_config->pictureParameterSets=NULL;
	//	delete p_config->sequenceParameterSets;
		p_config->sequenceParameterSets=NULL;
		gf_odf_avc_cfg_del(p_config);
		p_config=NULL;
	}
	if(	p_audiosample)
	{
		if(	p_audiosample->data)
		{
			free(p_audiosample->data);
			p_audiosample->data=NULL;
		}
		gf_isom_sample_del(&p_audiosample);
		p_audiosample=NULL;
	}

	if(	p_videosample)
	{
		if(	p_videosample->data)
		{
			free(p_videosample->data);
			p_videosample->data=NULL;
		}
		gf_isom_sample_del(&p_videosample);
		p_audiosample=NULL;
	}
	return true;
}
Exemplo n.º 3
0
static GF_Err TTIn_ServiceCommand(GF_InputService *plug, GF_NetworkCommand *com)
{
    TTIn *tti = (TTIn *)plug->priv;
    if (!tti)
        return GF_BAD_PARAM;
    if (!com->base.on_channel) return GF_NOT_SUPPORTED;
    switch (com->command_type) {
    case GF_NET_CHAN_SET_PADDING:
        gf_isom_set_sample_padding(tti->mp4, tti->tt_track, com->pad.padding_bytes);
        return GF_OK;
    case GF_NET_CHAN_DURATION:
        com->duration.duration = (Double) (s64) gf_isom_get_media_duration(tti->mp4, tti->tt_track);
        com->duration.duration /= gf_isom_get_media_timescale(tti->mp4, tti->tt_track);
        return GF_OK;
    case GF_NET_CHAN_PLAY:
        tti->start_range = (com->play.start_range>0) ? (u32) (com->play.start_range * 1000) : 0;
        if (tti->ch == com->base.on_channel) {
            tti->samp_num = 0;
            if (tti->samp) gf_isom_sample_del(&tti->samp);
        }
        return GF_OK;
    case GF_NET_CHAN_STOP:
        return GF_OK;
    default:
        return GF_OK;
    }
}
Exemplo n.º 4
0
void gf_isom_hint_sample_del(GF_HintSample *ptr)
{
	GF_HintPacket *pck;

	if (ptr->hint_subtype==GF_ISOM_BOX_TYPE_FDP_STSD) {
		gf_isom_box_del((GF_Box*)ptr);
		return;
	}

	while (gf_list_count(ptr->packetTable)) {
		pck = (GF_HintPacket *)gf_list_get(ptr->packetTable, 0);
		gf_isom_hint_pck_del(pck);
		gf_list_rem(ptr->packetTable, 0);
	}
	gf_list_del(ptr->packetTable);
	if (ptr->AdditionalData) gf_free(ptr->AdditionalData);

	if (ptr->sample_cache) {
		while (gf_list_count(ptr->sample_cache)) {
			GF_HintDataCache *hdc = (GF_HintDataCache *)gf_list_get(ptr->sample_cache, 0);
			gf_list_rem(ptr->sample_cache, 0);
			if (hdc->samp) gf_isom_sample_del(&hdc->samp);
			gf_free(hdc);
		}
		gf_list_del(ptr->sample_cache);
	}
	if (ptr->extra_data)
		gf_isom_box_del((GF_Box*)ptr->extra_data);
	if (ptr->other_boxes)
		gf_isom_box_array_del(ptr->other_boxes);

	gf_free(ptr);
}
Exemplo n.º 5
0
static GF_Err ISOW_Close(GF_StreamingCache *mc, Bool delete_cache)
{
	GF_Err e;
	ISOMReader *cache = (ISOMReader *)mc->priv;
	if (!cache->mov || !cache->service) return GF_BAD_PARAM;

	while (gf_list_count(cache->channels)) {
		ISOMChannel *ch = (ISOMChannel *)gf_list_get(cache->channels, 0);
		gf_list_rem(cache->channels, 0);
		if (ch->cache_sample) {
			gf_isom_add_sample(cache->mov, ch->track, 1, ch->cache_sample);
			gf_isom_sample_del(&ch->cache_sample);
		}
		gf_free(ch);
	}
	if (delete_cache) {
		gf_isom_delete(cache->mov);
		e = GF_OK;
	} else {
		e = gf_isom_close(cache->mov);
	}
	cache->mov = NULL;
	cache->service = NULL;
	return e;
}
Exemplo n.º 6
0
static GF_Err TTIn_CloseService(GF_InputService *plug)
{
    TTIn *tti;
    if (!plug)
        return GF_BAD_PARAM;
    tti = (TTIn *)plug->priv;
    if (!tti)
        return GF_BAD_PARAM;
    if (tti->samp)
        gf_isom_sample_del(&tti->samp);
    tti->samp = NULL;
    if (tti->mp4)
        gf_isom_delete(tti->mp4);
    tti->mp4 = NULL;
    if (tti->szFile) {
        gf_delete_file(tti->szFile);
        gf_free(tti->szFile);
        tti->szFile = NULL;
    }
    if (tti->dnload)
        gf_term_download_del(tti->dnload);
    tti->dnload = NULL;
    if (tti->service)
        gf_term_on_disconnect(tti->service, NULL, GF_OK);
    tti->service = NULL;
    return GF_OK;
}
Exemplo n.º 7
0
void dc_audio_muxer_free(AudioOutputFile * p_aoutf) {

	if (p_aoutf->p_isof != NULL) {
		gf_isom_close(p_aoutf->p_isof);
	}

	gf_isom_sample_del(&p_aoutf->p_sample);
}
Exemplo n.º 8
0
int dc_video_muxer_free(VideoOutputFile *video_output_file)
{
	if (video_output_file->isof != NULL) {
		gf_isom_close(video_output_file->isof);
	}

	gf_isom_sample_del(&video_output_file->sample);

	return 0;
}
Exemplo n.º 9
0
GF_EXPORT
GF_Err gf_webvtt_dump_iso_track(GF_MediaExporter *dumper, char *szName, u32 track, Bool merge, Bool box_dump)
{
#ifdef GPAC_DISABLE_MEDIA_IMPORT
	return GF_NOT_SUPPORTED;
#else
	GF_Err  e;
	u32     i;
	u32     count;
	u32     timescale;
	FILE    *out;
	u32     di;
	u64     duration;
	GF_WebVTTParser *parser;

	out = szName ? gf_fopen(szName, "wt") : (dumper->dump_file ? dumper->dump_file : stdout);
	if (!out) return GF_IO_ERR;// gf_export_message(dumper, GF_IO_ERR, "Error opening %s for writing - check disk access & permissions", szName);

	parser = gf_webvtt_parser_new();
	parser->user = out;
	parser->on_cue_read = gf_webvtt_dump_cue;

	if (box_dump)
		fprintf(out, "<WebVTTTrack trackID=\"%d\">\n", gf_isom_get_track_id(dumper->file, track) );

	e = gf_webvtt_dump_header(out, dumper->file, track, box_dump, 1);
	if (e) goto exit;

	timescale = gf_isom_get_media_timescale(dumper->file, track);

	count = gf_isom_get_sample_count(dumper->file, track);
	for (i=0; i<count; i++) {
		GF_ISOSample *samp = gf_isom_get_sample(dumper->file, track, i+1, &di);
		if (!samp) {
			e = gf_isom_last_error(dumper->file);
			goto exit;
		}
		e = gf_webvtt_parse_iso_sample(parser, timescale, samp, merge, box_dump);
		if (e) {
			goto exit;
		}
		gf_isom_sample_del(&samp);
	}
	duration = gf_isom_get_media_duration(dumper->file, track);
	gf_webvtt_parser_dump_finalize(parser, duration);

	if (box_dump)
		fprintf(out, "</WebVTTTrack>\n");

exit:
	gf_webvtt_parser_del(parser);
	if (szName) gf_fclose(out);
	return e;
#endif
}
Exemplo n.º 10
0
static void recompute_bitrate_mp4( GF_ISOFile *p_file, int i_track )
{
    u32 count, di, timescale, time_wnd, rate;
    u64 offset;
    Double br;
    GF_ESD *esd;

    esd = gf_isom_get_esd( p_file, i_track, 1 );
    if( !esd )
        return;

    esd->decoderConfig->avgBitrate = 0;
    esd->decoderConfig->maxBitrate = 0;
    rate = time_wnd = 0;

    timescale = gf_isom_get_media_timescale( p_file, i_track );
    count = gf_isom_get_sample_count( p_file, i_track );
    for( u32 i = 0; i < count; i++ )
    {
        GF_ISOSample *samp = gf_isom_get_sample_info( p_file, i_track, i+1, &di, &offset );
        if( !samp )
        {
            x264_cli_log( "mp4", X264_LOG_ERROR, "failure reading back frame %u\n", i );
            break;
        }

        if( esd->decoderConfig->bufferSizeDB < samp->dataLength )
            esd->decoderConfig->bufferSizeDB = samp->dataLength;

        esd->decoderConfig->avgBitrate += samp->dataLength;
        rate += samp->dataLength;
        if( samp->DTS > time_wnd + timescale )
        {
            if( rate > esd->decoderConfig->maxBitrate )
                esd->decoderConfig->maxBitrate = rate;
            time_wnd = samp->DTS;
            rate = 0;
        }

        gf_isom_sample_del( &samp );
    }

    br = (Double)(s64)gf_isom_get_media_duration( p_file, i_track );
    br /= timescale;
    esd->decoderConfig->avgBitrate = (u32)(esd->decoderConfig->avgBitrate / br);
    /*move to bps*/
    esd->decoderConfig->avgBitrate *= 8;
    esd->decoderConfig->maxBitrate *= 8;

    gf_isom_change_mpeg4_description( p_file, i_track, 1, esd );
    gf_odf_desc_del( (GF_Descriptor*)esd );
}
Exemplo n.º 11
0
static GF_Err TTIn_ChannelReleaseSLP(GF_InputService *plug, LPNETCHANNEL channel)
{
    TTIn *tti = (TTIn *)plug->priv;

    if (tti->ch == channel) {
        if (!tti->samp) return GF_BAD_PARAM;
        gf_isom_sample_del(&tti->samp);
        tti->samp = NULL;
        tti->samp_num++;
        return GF_OK;
    }
    return GF_OK;
}
Exemplo n.º 12
0
void gf_media_get_sample_average_infos(GF_ISOFile *file, u32 Track, u32 *avgSize, u32 *MaxSize, u32 *TimeDelta, u32 *maxCTSDelta, u32 *const_duration, u32 *bandwidth)
{
	u32 i, count, ts_diff;
	u64 prevTS, DTS, tdelta;
	Double bw;
	GF_ISOSample *samp;

	*avgSize = *MaxSize = 0;
	*TimeDelta = 0;
	*maxCTSDelta = 0;
	bw = 0;
	prevTS = 0;
	DTS = 0;
	tdelta = 0;

	count = gf_isom_get_sample_count(file, Track);
	*const_duration = 0;

	for (i=0; i<count; i++) {
		samp = gf_isom_get_sample_info(file, Track, i+1, NULL, NULL);
		//get the size
		*avgSize += samp->dataLength;
		if (*MaxSize < samp->dataLength) *MaxSize = samp->dataLength;
		ts_diff = (u32) (samp->DTS+samp->CTS_Offset - prevTS);
		//get the time
		tdelta += ts_diff;

		if (i==1) {
			*const_duration = ts_diff;
		} else if ( (i<count-1) && (*const_duration != ts_diff) ) {
			*const_duration = 0;
		}

		prevTS = samp->DTS+samp->CTS_Offset;
		bw += 8*samp->dataLength;
		
		//get the CTS delta
		if (samp->CTS_Offset > *maxCTSDelta) *maxCTSDelta = samp->CTS_Offset;
		gf_isom_sample_del(&samp);
	}
	if (count>1) *TimeDelta = (u32) (tdelta/ (count-1) );
	else *TimeDelta = (u32) tdelta;
	*avgSize /= count;
	bw *= gf_isom_get_media_timescale(file, Track);
	bw /= (s64) gf_isom_get_media_duration(file, Track);
	bw /= 1000;
	(*bandwidth) = (u32) (bw+0.5);

	//delta is NOT an average, we need to know exactly how many bits are
	//needed to encode CTS-DTS for ANY samples
}
Exemplo n.º 13
0
GF_EXPORT
void gf_isom_streamer_del(GF_ISOMRTPStreamer *streamer) 
{
	GF_RTPTrack *track = streamer->stream;
	while (track) {
		GF_RTPTrack *tmp = track;
		if (track->au) gf_isom_sample_del(&track->au);
		if (track->rtp) gf_rtp_streamer_del(track->rtp);
		track = track->next;
		gf_free(tmp);
	}
	if (streamer->isom) gf_isom_close(streamer->isom);
	gf_free(streamer->dest_ip);
	gf_free(streamer);
}
Exemplo n.º 14
0
GF_Err get_laser_track_size(GF_ISOFile *mp4, u32 *size)
{
	GF_Err e = GF_OK;
	u32 j;
	u32 track_id, trackNum;
	
	*size = 0;
	track_id = gf_isom_get_track_id(mp4, 1);
	trackNum = gf_isom_get_track_by_id(mp4, track_id);
	for (j=0; j<gf_isom_get_sample_count(mp4, trackNum); j++) {
		GF_ISOSample *samp = gf_isom_get_sample_info(mp4, trackNum, j+1, NULL, NULL);
		*size += samp->dataLength;
		gf_isom_sample_del(&samp);
	}
	return e;
}
Exemplo n.º 15
0
//stores the hint sample in the file
//set IsRandomAccessPoint if you want to indicate that this is a random access point 
//in the stream
GF_Err gf_isom_end_hint_sample(GF_ISOFile *the_file, u32 trackNumber, u8 IsRandomAccessPoint)
{
	GF_TrackBox *trak;
	GF_HintSampleEntryBox *entry;
	u32 dataRefIndex;
	GF_Err e;
	GF_BitStream *bs;
	GF_ISOSample *samp;

	trak = gf_isom_get_track_from_file(the_file, trackNumber);
	if (!trak || !IsHintTrack(trak)) return GF_BAD_PARAM;

	e = Media_GetSampleDesc(trak->Media, trak->Media->information->sampleTable->currentEntryIndex, (GF_SampleEntryBox **) &entry, &dataRefIndex);
	if (e) return e;
	if (!entry->hint_sample) return GF_BAD_PARAM;

	//first of all, we need to adjust the offset for data referenced IN THIS hint sample
	//and get some PckSize
	e = AdjustHintInfo(entry, trak->Media->information->sampleTable->SampleSize->sampleCount + 1);
	if (e) return e;	
	
	//ok, let's write the sample
	bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE);
	e = gf_isom_hint_sample_write(entry->hint_sample, bs);
	if (e) {
		gf_bs_del(bs);
		return e;
	}
	samp = gf_isom_sample_new();
	samp->CTS_Offset = 0;
	samp->IsRAP = IsRandomAccessPoint;
	samp->DTS = entry->hint_sample->TransmissionTime;
	//get the sample
	gf_bs_get_content(bs, &samp->data, &samp->dataLength);
	gf_bs_del(bs);

	//finally add the sample 
	e = gf_isom_add_sample(the_file, trackNumber, trak->Media->information->sampleTable->currentEntryIndex, samp);
	gf_isom_sample_del(&samp);

	//and delete the sample in our entry ...
	gf_isom_hint_sample_del(entry->hint_sample);
	entry->hint_sample = NULL;
	return e;
}
Exemplo n.º 16
0
GF_EXPORT
u32 gf_isom_find_od_for_track(GF_ISOFile *file, u32 track)
{
	u32 i, j, di, the_od_id;
	GF_TrackBox *od_tk;
	GF_TrackBox *tk = gf_isom_get_track_from_file(file, track);
	if (!tk) return 0;

	i = 0;
	while ((od_tk = (GF_TrackBox*)gf_list_enum(file->moov->trackList, &i))) {
		if (od_tk->Media->handler->handlerType != GF_ISOM_MEDIA_OD) continue;

		for (j = 0; j<od_tk->Media->information->sampleTable->SampleSize->sampleCount; j++) {
			GF_ISOSample *samp = gf_isom_get_sample(file, i, j + 1, &di);
			the_od_id = Media_FindOD_ID(od_tk->Media, samp, tk->Header->trackID);
			gf_isom_sample_del(&samp);
			if (the_od_id) return the_od_id;
		}
	}
	return 0;
}
Exemplo n.º 17
0
Arquivo: hinting.c Projeto: erelh/gpac
void gf_isom_hint_sample_del(GF_HintSample *ptr)
{
	GF_HintPacket *pck;

	while (gf_list_count(ptr->packetTable)) {
		pck = (GF_HintPacket *)gf_list_get(ptr->packetTable, 0);
		gf_isom_hint_pck_del(ptr->HintType, pck);
		gf_list_rem(ptr->packetTable, 0);
	}
	gf_list_del(ptr->packetTable);
	if (ptr->AdditionalData) gf_free(ptr->AdditionalData);

	if (ptr->sample_cache) {
		while (gf_list_count(ptr->sample_cache)) {
			GF_HintDataCache *hdc = (GF_HintDataCache *)gf_list_get(ptr->sample_cache, 0);
			gf_list_rem(ptr->sample_cache, 0);
			if (hdc->samp) gf_isom_sample_del(&hdc->samp);
			gf_free(hdc);
		}
		gf_list_del(ptr->sample_cache);
	}
	gf_free(ptr);
}
Exemplo n.º 18
0
static void process_samples_from_track(GF_ISOFile *movie, u32 track_id, u32 *sample_index)
{
	u32 track_number;
	u32 sample_count;
	/* Error indicator */
	GF_Err e;
	/* Number of bytes required to finish the current ISO Box reading */
	u64 missing_bytes;

	track_number = gf_isom_get_track_by_id(movie, track_id);
	if (track_number == 0) {
		fprintf(stdout, "Could not find track ID=%u. Ignore segment.\n", track_id);
		return;
	}

	sample_count = gf_isom_get_sample_count(movie, track_number);
	while (*sample_index <= sample_count) {
		GF_ISOSample *iso_sample;
		u32 sample_description_index;

		iso_sample = gf_isom_get_sample(movie, track_number, *sample_index, &sample_description_index);
		if (iso_sample) {
			fprintf(stdout, "Found sample #%5d/%5d of length %8d, RAP: %d, DTS: "LLD", CTS: "LLD"\n", *sample_index, sample_count, iso_sample->dataLength, iso_sample->IsRAP, iso_sample->DTS, iso_sample->DTS+iso_sample->CTS_Offset);
			(*sample_index)++;

			/* Release the sample data, once you're done with it*/
			gf_isom_sample_del(&iso_sample);
		} else {
			e = gf_isom_last_error(movie);
			if (e == GF_ISOM_INCOMPLETE_FILE) {
				missing_bytes = gf_isom_get_missing_bytes(movie, track_number);
				fprintf(stdout, "Missing "LLU" bytes on input file\n", missing_bytes);
				gf_sleep(1000);
			}
		}
	}
}
Exemplo n.º 19
0
static GF_Err gf_isom_load_next_hint_sample(GF_ISOFile *the_file, u32 trackNumber, GF_TrackBox *trak, GF_HintSampleEntryBox *entry)
{
	GF_BitStream *bs;
	u32 descIdx;
	GF_ISOSample *samp;

	if (!entry->cur_sample) return GF_BAD_PARAM;
	if (entry->cur_sample>trak->Media->information->sampleTable->SampleSize->sampleCount) return GF_EOS;

	samp = gf_isom_get_sample(the_file, trackNumber, entry->cur_sample, &descIdx);
	if (!samp) return GF_IO_ERR;
	entry->cur_sample++;

	if (entry->hint_sample) gf_isom_hint_sample_del(entry->hint_sample);

	bs = gf_bs_new(samp->data, samp->dataLength, GF_BITSTREAM_READ);
	entry->hint_sample = gf_isom_hint_sample_new(entry->type);
	gf_isom_hint_sample_read(entry->hint_sample, bs, samp->dataLength);
	gf_bs_del(bs);
	entry->hint_sample->TransmissionTime = samp->DTS;
	gf_isom_sample_del(&samp);
	entry->hint_sample->sample_cache = gf_list_new();
	return GF_OK;
}
Exemplo n.º 20
0
// Update the dependancies when an OD AU is inserted in the file
GF_Err Media_ParseODFrame(GF_MediaBox *mdia, GF_ISOSample *sample, GF_ISOSample **od_samp)
{
	GF_TrackReferenceBox *tref;
	GF_TrackReferenceTypeBox *mpod;
	GF_Err e;
	GF_ODCom *com;
	GF_ODCodec *ODencode;
	GF_ODCodec *ODdecode;
	u32 i, j;
	//the commands we proceed
	GF_ESDUpdate *esdU, *esdU2;
	GF_ESDRemove *esdR, *esdR2;
	GF_ODUpdate *odU, *odU2;

	//the desc they contain
	GF_ObjectDescriptor *od;
	GF_IsomObjectDescriptor *isom_od;
	GF_ESD *esd;
	GF_ES_ID_Ref *ref;
	GF_Descriptor *desc;

	*od_samp = NULL;
	if (!mdia || !sample || !sample->data || !sample->dataLength) return GF_BAD_PARAM;

	//First find the references, and create them if none
	tref = mdia->mediaTrack->References;
	if (!tref) {
		tref = (GF_TrackReferenceBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_TREF);
		e = trak_AddBox((GF_Box*)mdia->mediaTrack, (GF_Box *) tref);
		if (e) return e;
	}
	//then find the OD reference, and create it if none
	e = Track_FindRef(mdia->mediaTrack, GF_ISOM_BOX_TYPE_MPOD, &mpod);
	if (e) return e;
	if (!mpod) {
		mpod = (GF_TrackReferenceTypeBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_REFT);
		mpod->reference_type = GF_ISOM_BOX_TYPE_MPOD;
		e = tref_AddBox((GF_Box*)tref, (GF_Box *)mpod);
		if (e) return e;
	}

	//OK, create our codecs
	ODencode = gf_odf_codec_new();
	if (!ODencode) return GF_OUT_OF_MEM;
	ODdecode = gf_odf_codec_new();
	if (!ODdecode) return GF_OUT_OF_MEM;

	e = gf_odf_codec_set_au(ODdecode, sample->data, sample->dataLength);
	if (e) goto err_exit;
	e = gf_odf_codec_decode(ODdecode);
	if (e) goto err_exit;

	while (1) {
		com = gf_odf_codec_get_com(ODdecode);
		if (!com) break;

		//check our commands
		switch (com->tag) {
		//Rewrite OD Update
		case GF_ODF_OD_UPDATE_TAG:
			//duplicate our command
			odU = (GF_ODUpdate *) com;
			odU2 = (GF_ODUpdate *) gf_odf_com_new(GF_ODF_OD_UPDATE_TAG);

			i=0;
			while ((desc = (GF_Descriptor*)gf_list_enum(odU->objectDescriptors, &i))) {
				//both OD and IODs are accepted
				switch (desc->tag) {
				case GF_ODF_OD_TAG:
				case GF_ODF_IOD_TAG:
					break;
				default:
					e = GF_ODF_INVALID_DESCRIPTOR;
					goto err_exit;
				}
				//get the esd
				e = gf_odf_desc_copy(desc, (GF_Descriptor **)&od);
				if (e) goto err_exit;
				if (desc->tag == GF_ODF_OD_TAG) {
					isom_od = (GF_IsomObjectDescriptor *) gf_malloc(sizeof(GF_IsomObjectDescriptor));
					isom_od->tag = GF_ODF_ISOM_OD_TAG;
				} else {
					isom_od = (GF_IsomObjectDescriptor *) gf_malloc(sizeof(GF_IsomInitialObjectDescriptor));
					isom_od->tag = GF_ODF_ISOM_IOD_TAG;
					//copy PL
					((GF_IsomInitialObjectDescriptor *)isom_od)->inlineProfileFlag = ((GF_InitialObjectDescriptor *)od)->inlineProfileFlag;
					((GF_IsomInitialObjectDescriptor *)isom_od)->graphics_profileAndLevel = ((GF_InitialObjectDescriptor *)od)->graphics_profileAndLevel;
					((GF_IsomInitialObjectDescriptor *)isom_od)->audio_profileAndLevel = ((GF_InitialObjectDescriptor *)od)->audio_profileAndLevel;
					((GF_IsomInitialObjectDescriptor *)isom_od)->OD_profileAndLevel = ((GF_InitialObjectDescriptor *)od)->OD_profileAndLevel;
					((GF_IsomInitialObjectDescriptor *)isom_od)->scene_profileAndLevel = ((GF_InitialObjectDescriptor *)od)->scene_profileAndLevel;
					((GF_IsomInitialObjectDescriptor *)isom_od)->visual_profileAndLevel = ((GF_InitialObjectDescriptor *)od)->visual_profileAndLevel;
					((GF_IsomInitialObjectDescriptor *)isom_od)->IPMPToolList = ((GF_InitialObjectDescriptor *)od)->IPMPToolList;
					((GF_InitialObjectDescriptor *)od)->IPMPToolList = NULL;
				}
				//in OD stream only ref desc are accepted
				isom_od->ES_ID_RefDescriptors = gf_list_new();
				isom_od->ES_ID_IncDescriptors = NULL;
	
				//TO DO: check that a given sampleDescription exists
				isom_od->extensionDescriptors = od->extensionDescriptors;
				od->extensionDescriptors = NULL;
				isom_od->IPMP_Descriptors = od->IPMP_Descriptors;
				od->IPMP_Descriptors = NULL;
				isom_od->OCIDescriptors = od->OCIDescriptors;
				od->OCIDescriptors = NULL;
				isom_od->URLString = od->URLString;
				od->URLString = NULL;
				isom_od->objectDescriptorID = od->objectDescriptorID;

				j=0;
				while ((esd = (GF_ESD*)gf_list_enum(od->ESDescriptors, &j))) {
					ref = (GF_ES_ID_Ref *) gf_odf_desc_new(GF_ODF_ESD_REF_TAG);
					//1 to 1 mapping trackID and ESID. Add this track to MPOD
					//if track does not exist, this will be remove while reading the OD stream
					e = reftype_AddRefTrack(mpod, esd->ESID, &ref->trackRef);
					e = gf_odf_desc_add_desc((GF_Descriptor *)isom_od, (GF_Descriptor *)ref);
					if (e) goto err_exit;
				}
				//delete our desc
				gf_odf_desc_del((GF_Descriptor *)od);
				//and add the new one to our command
				gf_list_add(odU2->objectDescriptors, isom_od);
			}
			//delete the command
			gf_odf_com_del((GF_ODCom **)&odU);
			//and add the new one to the codec
			gf_odf_codec_add_com(ODencode, (GF_ODCom *)odU2);
			break;

		//Rewrite ESD Update
		case GF_ODF_ESD_UPDATE_TAG:
			esdU = (GF_ESDUpdate *) com;
			esdU2 = (GF_ESDUpdate *) gf_odf_com_new(GF_ODF_ESD_UPDATE_TAG);
			esdU2->ODID = esdU->ODID;
			i=0;
			while ((esd = (GF_ESD*)gf_list_enum(esdU->ESDescriptors, &i))) {
				ref = (GF_ES_ID_Ref *) gf_odf_desc_new(GF_ODF_ESD_REF_TAG);
				//1 to 1 mapping trackID and ESID
				e = reftype_AddRefTrack(mpod, esd->ESID, &ref->trackRef);
				e = gf_list_add(esdU2->ESDescriptors, ref);
				if (e) goto err_exit;
			}
			gf_odf_com_del((GF_ODCom **)&esdU);
			gf_odf_codec_add_com(ODencode, (GF_ODCom *)esdU2);
			break;

		//Brand new case: the ESRemove has to be rewritten too according to the specs...
		case GF_ODF_ESD_REMOVE_TAG:
			esdR = (GF_ESDRemove *) com;
			esdR2 = (GF_ESDRemove *) gf_odf_com_new(GF_ODF_ESD_REMOVE_TAG);
			//change the tag for the file format
			esdR2->tag = GF_ODF_ESD_REMOVE_REF_TAG;
			esdR2->ODID = esdR->ODID;
			esdR2->NbESDs = esdR->NbESDs;
			if (esdR->NbESDs) {
				//alloc our stuff
				esdR2->ES_ID = (unsigned short*)gf_malloc(sizeof(u32) * esdR->NbESDs);
				if (!esdR2->ES_ID) {
					e = GF_OUT_OF_MEM;
					goto err_exit;
				}
				for (i = 0; i < esdR->NbESDs; i++) {
					//1 to 1 mapping trackID and ESID
					e = reftype_AddRefTrack(mpod, esdR->ES_ID[i], &esdR2->ES_ID[i]);
					if (e) goto err_exit;
				}
			}
			gf_odf_com_del(&com);
			gf_odf_codec_add_com(ODencode, (GF_ODCom *)esdR2);
			break;

		//Add the command as is
		default:
			e = gf_odf_codec_add_com(ODencode, com);
			if (e) goto err_exit;
		}
	}

	//encode our new AU
	e = gf_odf_codec_encode(ODencode, 1);
	if (e) goto err_exit;

	//and set the buffer in the sample
	*od_samp = gf_isom_sample_new();
	(*od_samp)->CTS_Offset = sample->CTS_Offset;
	(*od_samp)->DTS = sample->DTS;
	(*od_samp)->IsRAP = sample->IsRAP;
	e = gf_odf_codec_get_au(ODencode, & (*od_samp)->data, & (*od_samp)->dataLength);
	if (e) {
		gf_isom_sample_del(od_samp);
		*od_samp = NULL;
	}

err_exit:

	gf_odf_codec_del(ODencode);
	gf_odf_codec_del(ODdecode);
	return e;
}
Exemplo n.º 21
0
static GF_Err ISOW_Write(GF_StreamingCache *mc, LPNETCHANNEL ch, char *data, u32 data_size, GF_SLHeader *sl_hdr)
{
	ISOMChannel *mch;
	GF_ESD *esd;
	u32 di, mtype;
	u64 DTS, CTS;
	ISOMReader *cache = (ISOMReader *)mc->priv;
	if (!cache->mov || !cache->service) return GF_BAD_PARAM;

	mch = isor_get_channel(cache, ch);
	if (!mch) {
		Bool mapped;
		GF_NetworkCommand com;
		com.base.on_channel = ch;
		com.base.command_type = GF_NET_CHAN_GET_ESD;
		gf_service_command(cache->service, &com, GF_OK);
		if (!com.cache_esd.esd) return GF_SERVICE_ERROR;

		esd = (GF_ESD *)com.cache_esd.esd;
		switch (esd->decoderConfig->streamType) {
		case GF_STREAM_OD:
			mtype = GF_ISOM_MEDIA_OD;
			break;
		case GF_STREAM_SCENE:
			mtype = GF_ISOM_MEDIA_SCENE;
			break;
		case GF_STREAM_VISUAL:
			mtype = GF_ISOM_MEDIA_VISUAL;
			break;
		case GF_STREAM_AUDIO:
			mtype = GF_ISOM_MEDIA_AUDIO;
			break;
		case GF_STREAM_MPEG7:
			mtype = GF_ISOM_MEDIA_MPEG7;
			break;
		case GF_STREAM_OCI:
			mtype = GF_ISOM_MEDIA_OCI;
			break;
		case GF_STREAM_IPMP:
			mtype = GF_ISOM_MEDIA_IPMP;
			break;
		case GF_STREAM_MPEGJ:
			mtype = GF_ISOM_MEDIA_MPEGJ;
			break;
		case GF_STREAM_TEXT:
			mtype = GF_ISOM_MEDIA_TEXT;
			break;
		default:
			return GF_NOT_SUPPORTED;
		}
		GF_SAFEALLOC(mch, ISOMChannel);
		if (!mch) {
			return GF_OUT_OF_MEM;
		}
		mch->time_scale = esd->slConfig->timestampResolution;
		mch->streamType = esd->decoderConfig->streamType;
		mch->track = gf_isom_new_track(cache->mov, com.cache_esd.esd->ESID, mtype, mch->time_scale);
		mch->is_playing = GF_TRUE;
		mch->channel = ch;
		mch->owner = cache;
		gf_isom_set_track_enabled(cache->mov, mch->track, 1);
		/*translate 3GP streams to MP4*/
		mapped = GF_FALSE;
		if (esd->decoderConfig->objectTypeIndication==GPAC_OTI_MEDIA_GENERIC) {
			char szCode[5];
			strncpy(szCode, esd->decoderConfig->decoderSpecificInfo->data, 4);
			szCode[4]=0;
			if (!stricmp(szCode, "samr") || !stricmp(szCode, "amr ") || !stricmp(szCode, "sawb")) {
				GF_3GPConfig amrc;
				mapped = GF_TRUE;
				memset(&amrc, 0, sizeof(GF_3GPConfig));

				amrc.frames_per_sample = (u32) esd->decoderConfig->decoderSpecificInfo->data[13];
				amrc.type = (!stricmp(szCode, "sawb")) ? GF_ISOM_SUBTYPE_3GP_AMR_WB : GF_ISOM_SUBTYPE_3GP_AMR;
				amrc.vendor = GF_VENDOR_GPAC;
				gf_isom_3gp_config_new(cache->mov, mch->track, &amrc, NULL, NULL, &di);
			} else if (!stricmp(szCode, "h263")) {
				GF_3GPConfig h263c;
				memset(&h263c, 0, sizeof(GF_3GPConfig));
				h263c.type = GF_ISOM_SUBTYPE_3GP_H263;
				h263c.vendor = GF_VENDOR_GPAC;
				gf_isom_3gp_config_new(cache->mov, mch->track, &h263c, NULL, NULL, &di);
				mapped = GF_TRUE;
			}
		}
		if (!mapped) gf_isom_new_mpeg4_description(cache->mov, mch->track, esd, NULL, NULL, &di);
		if (com.cache_esd.is_iod_stream) gf_isom_add_track_to_root_od(cache->mov, mch->track);
		gf_list_add(cache->channels, mch);
	}

	/*first sample, cache it*/
	if (!mch->cache_sample) {
		mch->cache_seed_ts = sl_hdr->decodingTimeStamp;
		mch->cache_sample = gf_isom_sample_new();
		mch->cache_sample->IsRAP = sl_hdr->randomAccessPointFlag;
		mch->cache_sample->dataLength = data_size;
		mch->cache_sample->data = (char*)gf_malloc(sizeof(char)*data_size);
		memcpy(mch->cache_sample->data, data, sizeof(char)*data_size);
		return GF_OK;
	}

	/*adjust DTS/CTS*/
	DTS = sl_hdr->decodingTimeStamp - mch->cache_seed_ts;

	if ((mch->streamType==GF_STREAM_VISUAL) && (DTS<=mch->cache_sample->DTS)) {
		assert(DTS>mch->prev_dts);
		CTS = mch->cache_sample->DTS + mch->cache_sample->CTS_Offset;
		mch->cache_sample->CTS_Offset = 0;

		/*first time, shift all CTS*/
		if (!mch->frame_cts_offset) {
			u32 i, count = gf_isom_get_sample_count(cache->mov, mch->track);
			mch->frame_cts_offset = (u32) (DTS-mch->prev_dts);
			for (i=0; i<count; i++) {
				gf_isom_modify_cts_offset(cache->mov, mch->track, i+1, mch->frame_cts_offset);
			}
			mch->cache_sample->CTS_Offset += mch->frame_cts_offset;
		}
		mch->cache_sample->DTS = mch->prev_dts + mch->frame_cts_offset;
		mch->cache_sample->CTS_Offset += (u32) (CTS-mch->cache_sample->DTS);
	}
	/*deal with reference picture insertion: if no CTS offset and biggest CTS until now, this is
	a reference insertion - we must check that in order to make sure we have strictly increasing DTSs*/
	if (mch->max_cts && !mch->cache_sample->CTS_Offset && (mch->cache_sample->DTS+mch->cache_sample->CTS_Offset > mch->max_cts)) {
		assert(mch->cache_sample->DTS > mch->prev_dts + mch->frame_cts_offset);
		CTS = mch->cache_sample->DTS + mch->cache_sample->CTS_Offset;
		mch->cache_sample->DTS = mch->prev_dts + mch->frame_cts_offset;
		mch->cache_sample->CTS_Offset = (u32) (CTS-mch->cache_sample->DTS);
	}
	if (mch->cache_sample->CTS_Offset)
		mch->max_cts = mch->cache_sample->DTS+mch->cache_sample->CTS_Offset;

	/*add cache*/
	gf_isom_add_sample(cache->mov, mch->track, 1, mch->cache_sample);
	assert(!mch->prev_dts || (mch->prev_dts < mch->cache_sample->DTS));
	mch->prev_dts = mch->cache_sample->DTS;
	mch->duration = MAX(mch->max_cts, mch->prev_dts);
	gf_isom_sample_del(&mch->cache_sample);

	/*store sample*/
	mch->cache_sample = gf_isom_sample_new();
	mch->cache_sample->IsRAP = sl_hdr->randomAccessPointFlag;
	mch->cache_sample->DTS = DTS + mch->frame_cts_offset;
	mch->cache_sample->CTS_Offset = (u32) (sl_hdr->compositionTimeStamp - mch->cache_seed_ts - DTS);
	mch->cache_sample->dataLength = data_size;
	mch->cache_sample->data = (char*)gf_malloc(sizeof(char)*data_size);
	memcpy(mch->cache_sample->data, data, sizeof(char)*data_size);
	return GF_OK;
}
Exemplo n.º 22
0
static GF_Err gf_sm_load_run_isom(GF_SceneLoader *load)
{
	GF_Err e;
	FILE *logs;
	u32 i, j, di, nbBifs, nbLaser, nb_samp, samp_done, init_offset;
	GF_StreamContext *sc;
	GF_ESD *esd;
	GF_ODCodec *od_dec;
#ifndef GPAC_DISABLE_BIFS
	GF_BifsDecoder *bifs_dec;
#endif
#ifndef GPAC_DISABLE_LASER
	GF_LASeRCodec *lsr_dec;
#endif

	if (!load || !load->isom) return GF_BAD_PARAM;

	nbBifs = nbLaser = 0;
	e = GF_OK;
#ifndef GPAC_DISABLE_BIFS
	bifs_dec = gf_bifs_decoder_new(load->scene_graph, 1);
	gf_bifs_decoder_set_extraction_path(bifs_dec, load->localPath, load->fileName);
#endif
	od_dec = gf_odf_codec_new();
	logs = NULL;
#ifndef GPAC_DISABLE_LASER
	lsr_dec = gf_laser_decoder_new(load->scene_graph);
#endif
	esd = NULL;
	/*load each stream*/
	nb_samp = 0;
	for (i=0; i<gf_isom_get_track_count(load->isom); i++) {
		u32 type = gf_isom_get_media_type(load->isom, i+1);
		switch (type) {
		case GF_ISOM_MEDIA_SCENE:
		case GF_ISOM_MEDIA_OD:
			nb_samp += gf_isom_get_sample_count(load->isom, i+1);
			break;
		default:
			break;
		}
	}
	samp_done = 1;
	gf_isom_text_set_streaming_mode(load->isom, 1);

	for (i=0; i<gf_isom_get_track_count(load->isom); i++) {
		u32 type = gf_isom_get_media_type(load->isom, i+1);
		switch (type) {
		case GF_ISOM_MEDIA_SCENE:
		case GF_ISOM_MEDIA_OD:
			break;
		default:
			continue;
		}
		esd = gf_isom_get_esd(load->isom, i+1, 1);
		if (!esd) continue;


		if ((esd->decoderConfig->objectTypeIndication == GPAC_OTI_SCENE_AFX) ||
		        (esd->decoderConfig->objectTypeIndication == GPAC_OTI_SCENE_SYNTHESIZED_TEXTURE)
		   ) {
			nb_samp += gf_isom_get_sample_count(load->isom, i+1);
			continue;
		}
		sc = gf_sm_stream_new(load->ctx, esd->ESID, esd->decoderConfig->streamType, esd->decoderConfig->objectTypeIndication);
		sc->streamType = esd->decoderConfig->streamType;
		sc->ESID = esd->ESID;
		sc->objectType = esd->decoderConfig->objectTypeIndication;
		sc->timeScale = gf_isom_get_media_timescale(load->isom, i+1);

		/*we still need to reconfig the BIFS*/
		if (esd->decoderConfig->streamType==GF_STREAM_SCENE) {
#ifndef GPAC_DISABLE_BIFS
			/*BIFS*/
			if (esd->decoderConfig->objectTypeIndication<=2) {
				if (!esd->dependsOnESID && nbBifs && !i)
					mp4_report(load, GF_OK, "several scene namespaces used or improper scene dependencies in file - import may be incorrect");
				if (!esd->decoderConfig->decoderSpecificInfo) {
					/* Hack for T-DMB non compliant streams */
					e = gf_bifs_decoder_configure_stream(bifs_dec, esd->ESID, NULL, 0, esd->decoderConfig->objectTypeIndication);
				} else {
					e = gf_bifs_decoder_configure_stream(bifs_dec, esd->ESID, esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, esd->decoderConfig->objectTypeIndication);
				}
				if (e) goto exit;
				nbBifs++;
			}
#endif

#ifndef GPAC_DISABLE_LASER
			/*LASER*/
			if (esd->decoderConfig->objectTypeIndication==0x09) {
				if (!esd->dependsOnESID && nbBifs && !i)
					mp4_report(load, GF_OK, "several scene namespaces used or improper scene dependencies in file - import may be incorrect");
				e = gf_laser_decoder_configure_stream(lsr_dec, esd->ESID, esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength);
				if (e) goto exit;
				nbLaser++;
			}
#endif
		}

		init_offset = 0;
		/*dump all AUs*/
		for (j=0; j<gf_isom_get_sample_count(load->isom, i+1); j++) {
			GF_AUContext *au;
			GF_ISOSample *samp = gf_isom_get_sample(load->isom, i+1, j+1, &di);
			if (!samp) {
				mp4_report(load, gf_isom_last_error(load->isom), "Unable to fetch sample %d from track ID %d - aborting track import", j+1, gf_isom_get_track_id(load->isom, i+1));
				break;
			}
			/*check if track has initial offset*/
			if (!j && gf_isom_get_edit_segment_count(load->isom, i+1)) {
				u64 EditTime, dur, mtime;
				u8 mode;
				gf_isom_get_edit_segment(load->isom, i+1, 1, &EditTime, &dur, &mtime, &mode);
				if (mode==GF_ISOM_EDIT_EMPTY) {
					init_offset = (u32) (dur * sc->timeScale / gf_isom_get_timescale(load->isom) );
				}
			}
			samp->DTS += init_offset;

			au = gf_sm_stream_au_new(sc, samp->DTS, ((Double)(s64) samp->DTS) / sc->timeScale, (samp->IsRAP==RAP) ? 1 : 0);

			if (esd->decoderConfig->streamType==GF_STREAM_SCENE) {
#ifndef GPAC_DISABLE_BIFS
				if (esd->decoderConfig->objectTypeIndication<=2)
					e = gf_bifs_decode_command_list(bifs_dec, esd->ESID, samp->data, samp->dataLength, au->commands);
#endif
#ifndef GPAC_DISABLE_LASER
				if (esd->decoderConfig->objectTypeIndication==0x09)
					e = gf_laser_decode_command_list(lsr_dec, esd->ESID, samp->data, samp->dataLength, au->commands);
#endif
			} else {
				e = gf_odf_codec_set_au(od_dec, samp->data, samp->dataLength);
				if (!e) e = gf_odf_codec_decode(od_dec);
				if (!e) {
					while (1) {
						GF_ODCom *odc = gf_odf_codec_get_com(od_dec);
						if (!odc) break;
						/*update ESDs if any*/
						UpdateODCommand(load->isom, odc);
						gf_list_add(au->commands, odc);
					}
				}
			}
			gf_isom_sample_del(&samp);
			if (e) {
				mp4_report(load, gf_isom_last_error(load->isom), "decoding sample %d from track ID %d failed", j+1, gf_isom_get_track_id(load->isom, i+1));
				goto exit;
			}

			samp_done++;
			gf_set_progress("MP4 Loading", samp_done, nb_samp);
		}
		gf_odf_desc_del((GF_Descriptor *) esd);
		esd = NULL;
	}
	gf_isom_text_set_streaming_mode(load->isom, 0);

exit:
#ifndef GPAC_DISABLE_BIFS
	gf_bifs_decoder_del(bifs_dec);
#endif
	gf_odf_codec_del(od_dec);
#ifndef GPAC_DISABLE_LASER
	gf_laser_decoder_del(lsr_dec);
#endif
	if (esd) gf_odf_desc_del((GF_Descriptor *) esd);
	if (logs) gf_fclose(logs);
	return e;
}
Exemplo n.º 23
0
GF_EXPORT
GF_Err gf_hinter_track_process(GF_RTPHinter *tkHint)
{
	GF_Err e;
	u32 i, descIndex, duration;
	u64 ts;
	u8 PadBits;
	Double ft;
	GF_ISOSample *samp;

	tkHint->HintSample = tkHint->RTPTime = 0;

	tkHint->TotalSample = gf_isom_get_sample_count(tkHint->file, tkHint->TrackNum);
	ft = tkHint->rtp_p->sl_config.timestampResolution;
	ft /= tkHint->OrigTimeScale;
	
	e = GF_OK;
	for (i=0; i<tkHint->TotalSample; i++) {
		samp = gf_isom_get_sample(tkHint->file, tkHint->TrackNum, i+1, &descIndex);
		if (!samp) return GF_IO_ERR;

		//setup SL
		tkHint->CurrentSample = i + 1;

		/*keep same AU indicator if sync shadow - TODO FIXME: this assumes shadows are placed interleaved with 
		the track content which is the case for GPAC scene carousel generation, but may not always be true*/
		if (samp->IsRAP==2) {
			tkHint->rtp_p->sl_header.AU_sequenceNumber -= 1;
			samp->IsRAP = 1;
		}

		ts = (u64) (ft * (s64) (samp->DTS+samp->CTS_Offset));
		tkHint->rtp_p->sl_header.compositionTimeStamp = ts;

		ts = (u64) (ft * (s64)(samp->DTS));
		tkHint->rtp_p->sl_header.decodingTimeStamp = ts;
		tkHint->rtp_p->sl_header.randomAccessPointFlag = samp->IsRAP;

		tkHint->base_offset_in_sample = 0;
		/*crypted*/
		if (tkHint->rtp_p->slMap.IV_length) {
			GF_ISMASample *s = gf_isom_get_ismacryp_sample(tkHint->file, tkHint->TrackNum, samp, descIndex);
			/*one byte take for selective_enc flag*/
			if (s->flags & GF_ISOM_ISMA_USE_SEL_ENC) tkHint->base_offset_in_sample += 1;
			if (s->flags & GF_ISOM_ISMA_IS_ENCRYPTED) tkHint->base_offset_in_sample += s->IV_length + s->KI_length;
			gf_free(samp->data);
			samp->data = s->data;
			samp->dataLength = s->dataLength;
			gp_rtp_builder_set_cryp_info(tkHint->rtp_p, s->IV, (char*)s->key_indicator, (s->flags & GF_ISOM_ISMA_IS_ENCRYPTED) ? 1 : 0);
			s->data = NULL;
			s->dataLength = 0;
			gf_isom_ismacryp_delete_sample(s);
		}

		if (tkHint->rtp_p->sl_config.usePaddingFlag) {
			gf_isom_get_sample_padding_bits(tkHint->file, tkHint->TrackNum, i+1, &PadBits);
			tkHint->rtp_p->sl_header.paddingBits = PadBits;
		} else {
			tkHint->rtp_p->sl_header.paddingBits = 0;
		}
		
		duration = gf_isom_get_sample_duration(tkHint->file, tkHint->TrackNum, i+1);
		ts = (u32) (ft * (s64) (duration));

		/*unpack nal units*/
		if (tkHint->avc_nalu_size) {
			u32 v, size;
			u32 remain = samp->dataLength;
			char *ptr = samp->data;

			tkHint->rtp_p->sl_header.accessUnitStartFlag = 1;
			tkHint->rtp_p->sl_header.accessUnitEndFlag = 0;
			while (remain) {
				size = 0;
				v = tkHint->avc_nalu_size;
				while (v) {
					size |= (u8) *ptr;
					ptr++;
					remain--;
					v-=1;
					if (v) size<<=8;
				}
				tkHint->base_offset_in_sample = samp->dataLength-remain;
				remain -= size;
				tkHint->rtp_p->sl_header.accessUnitEndFlag = remain ? 0 : 1;
				e = gf_rtp_builder_process(tkHint->rtp_p, ptr, size, (u8) !remain, samp->dataLength, duration, (u8) (descIndex + GF_RTP_TX3G_SIDX_OFFSET) );
				ptr += size;
				tkHint->rtp_p->sl_header.accessUnitStartFlag = 0;
			}
		} else {
			e = gf_rtp_builder_process(tkHint->rtp_p, samp->data, samp->dataLength, 1, samp->dataLength, duration, (u8) (descIndex + GF_RTP_TX3G_SIDX_OFFSET) );
		}
		tkHint->rtp_p->sl_header.packetSequenceNumber += 1;

		//signal some progress
		gf_set_progress("Hinting", tkHint->CurrentSample, tkHint->TotalSample);

		tkHint->rtp_p->sl_header.AU_sequenceNumber += 1;
		gf_isom_sample_del(&samp);

		if (e) return e;
	}

	//flush
	gf_rtp_builder_process(tkHint->rtp_p, NULL, 0, 1, 0, 0, 0);

	gf_isom_end_hint_sample(tkHint->file, tkHint->HintTrack, (u8) tkHint->SampleIsRAP);
	return GF_OK;
}
Exemplo n.º 24
0
GF_EXPORT
GF_Err gf_isom_streamer_send_next_packet(GF_ISOMRTPStreamer *streamer, s32 send_ahead_delay, s32 max_sleep_time) 
{
	GF_Err e = GF_OK;
	GF_RTPTrack *track, *to_send;
	u32 time, duration;
	s32 diff;
	u64 min_ts, dts, cts;

	if (!streamer) return GF_BAD_PARAM;
	
	/*browse all sessions and locate most mature stream*/
	to_send = NULL;
	min_ts = (u64) -1;

	time = gf_sys_clock();

	/*init session timeline - all sessions are sync'ed for packet scheduling purposes*/
	if (!streamer->timelineOrigin)
		streamer->timelineOrigin = time*1000;

	track = streamer->stream;
	while (track) {
		/*load next AU*/
		if (!track->au) {
			if (track->current_au >= track->nb_aus) {
				Double scale;
				if (!streamer->loop) {
					track = track->next;
					continue;
				}
				/*increment ts offset*/
				scale = track->timescale/1000.0;
				track->ts_offset += (u32) (streamer->duration_ms * scale);
				track->microsec_ts_offset = (u32) (track->ts_offset*(1000000.0/track->timescale)) + streamer->timelineOrigin;
				track->current_au = 0;
			}

			track->au = gf_isom_get_sample(streamer->isom, track->track_num, track->current_au + 1, &track->sample_desc_index);
			track->current_au ++;
			if (track->au) {
				track->microsec_dts = (u64) (track->microsec_ts_scale * (s64) (track->au->DTS)) + track->microsec_ts_offset + streamer->timelineOrigin;
			}
		}

		/*check timing*/
		if (track->au) {
			if (min_ts > track->microsec_dts) {
				min_ts = track->microsec_dts;
				to_send = track;
			}
		}

		track = track->next;
	}

	/*no input data ...*/
	if( !to_send) return GF_EOS;
	min_ts /= 1000;

	if (max_sleep_time) {
		diff = ((u32) min_ts) - gf_sys_clock();	
		if (diff>max_sleep_time) 
			return GF_OK;
	}

	/*sleep until TS is mature*/
	while (1) {
		diff = ((u32) min_ts) - gf_sys_clock();
		
		if (diff > send_ahead_delay) {
			gf_sleep(1);
		} else {
			if (diff<10) {
				GF_LOG(GF_LOG_DEBUG, GF_LOG_RTP, ("WARNING: RTP session %s stream %d - sending packet %d ms too late\n", gf_isom_get_filename(streamer->isom), to_send->track_num, -diff));
			}
			break;
		}
	}

	/*send packets*/

	dts = to_send->au->DTS + to_send->ts_offset;
	cts = to_send->au->DTS + to_send->au->CTS_Offset + to_send->ts_offset;
	duration = gf_isom_get_sample_duration(streamer->isom, to_send->track_num, to_send->current_au);

	/*unpack nal units*/
	if (to_send->avc_nalu_size) {
		Bool au_start, au_end;
		u32 v, size;
		u32 remain = to_send->au->dataLength;
		char *ptr = to_send->au->data;

		au_start = 1;
		au_end = 0;
		while (remain) {
			size = 0;
			v = to_send->avc_nalu_size;
			while (v) {
				size |= (u8) *ptr;
				ptr++;
				remain--;
				v-=1;
				if (v) size<<=8;
			}
			remain -= size;
			au_end = remain ? 0 : 1;

			e = gf_rtp_streamer_send_data(to_send->rtp, ptr, size, to_send->au->dataLength, cts, dts, to_send->au->IsRAP, au_start, au_end, to_send->current_au, duration, to_send->sample_desc_index);
			ptr += size;
			au_start = 0;
		}
	} else {
		e = gf_rtp_streamer_send_data(to_send->rtp, to_send->au->data, to_send->au->dataLength, to_send->au->dataLength, cts, dts, to_send->au->IsRAP, 1, 1, to_send->current_au, duration, to_send->sample_desc_index);
	}
	/*delete sample*/
	gf_isom_sample_del(&to_send->au);

	return e;
}
Exemplo n.º 25
0
/*import cubic QTVR to mp4*/
GF_Err gf_sm_load_init_qt(GF_SceneLoader *load)
{
	u32 i, di, w, h, tk, nb_samp;
	Bool has_qtvr;
	GF_ISOSample *samp;
	GF_ISOFile *src;
	GF_StreamContext *st;
	GF_AUContext *au;
	GF_Command *com;
	M_Background *back;
	M_NavigationInfo *ni;
	M_Group *gr;
	GF_ODUpdate *odU;
	GF_SceneGraph *sg;
	GF_ObjectDescriptor *od;
	GF_ESD *esd;

	if (!load->ctx) return GF_NOT_SUPPORTED;

	src = gf_isom_open(load->fileName, GF_ISOM_OPEN_READ, NULL);
	if (!src) return gf_qt_report(load, GF_URL_ERROR, "Opening file %s failed", load->fileName);

	w = h = tk = 0;
	nb_samp = 0;

	has_qtvr = 0;
	for (i=0; i<gf_isom_get_track_count(src); i++) {
		switch (gf_isom_get_media_type(src, i+1)) {
		case GF_ISOM_MEDIA_VISUAL:
			if (gf_isom_get_media_subtype(src, i+1, 1) == GF_4CC('j', 'p', 'e', 'g')) {
				GF_GenericSampleDescription *udesc = gf_isom_get_generic_sample_description(src, i+1, 1);
				if ((udesc->width>w) || (udesc->height>h)) {
					w = udesc->width;
					h = udesc->height;
					tk = i+1;
					nb_samp = gf_isom_get_sample_count(src, i+1);
				}
				if (udesc->extension_buf) gf_free(udesc->extension_buf);
				gf_free(udesc);
			}
			break;
		case GF_4CC('q','t','v','r'):
			has_qtvr = 1;
			break;
		}
	}
	if (!has_qtvr) {
		gf_isom_delete(src);
		return gf_qt_report(load, GF_NOT_SUPPORTED, "QTVR not found - no conversion available for this QuickTime movie");
	}
	if (!tk) {
		gf_isom_delete(src);
		return gf_qt_report(load, GF_NON_COMPLIANT_BITSTREAM, "No associated visual track with QTVR movie");
	}
	if (nb_samp!=6) {
		gf_isom_delete(src);
		return gf_qt_report(load, GF_NOT_SUPPORTED, "Movie %s doesn't look a Cubic QTVR - sorry...", load->fileName);
	}

	GF_LOG(GF_LOG_INFO, GF_LOG_PARSER, ("QT: Importing Cubic QTVR Movie"));

	/*create scene*/
	sg = load->ctx->scene_graph;
	gr = (M_Group *) gf_node_new(sg, TAG_MPEG4_Group);
	gf_node_register((GF_Node *)gr, NULL);
	st = gf_sm_stream_new(load->ctx, 1, GF_STREAM_SCENE, 1);
	au = gf_sm_stream_au_new(st, 0, 0, 1);
	com = gf_sg_command_new(load->ctx->scene_graph, GF_SG_SCENE_REPLACE);
	gf_list_add(au->commands, com);
	com->node = (GF_Node *)gr;

	back = (M_Background *) gf_node_new(sg, TAG_MPEG4_Background);
	gf_node_list_add_child( &gr->children, (GF_Node*)back);
	gf_node_register((GF_Node *)back, (GF_Node *)gr);

	gf_sg_vrml_mf_alloc(&back->leftUrl, GF_SG_VRML_MFURL, 1);
	back->leftUrl.vals[0].OD_ID = 2;
	gf_sg_vrml_mf_alloc(&back->frontUrl, GF_SG_VRML_MFURL, 1);
	back->frontUrl.vals[0].OD_ID = 3;
	gf_sg_vrml_mf_alloc(&back->rightUrl, GF_SG_VRML_MFURL, 1);
	back->rightUrl.vals[0].OD_ID = 4;
	gf_sg_vrml_mf_alloc(&back->backUrl, GF_SG_VRML_MFURL, 1);
	back->backUrl.vals[0].OD_ID = 5;
	gf_sg_vrml_mf_alloc(&back->topUrl, GF_SG_VRML_MFURL, 1);
	back->topUrl.vals[0].OD_ID = 6;
	gf_sg_vrml_mf_alloc(&back->bottomUrl, GF_SG_VRML_MFURL, 1);
	back->bottomUrl.vals[0].OD_ID = 7;

	ni = (M_NavigationInfo *) gf_node_new(sg, TAG_MPEG4_NavigationInfo);
	gf_node_list_add_child(&gr->children, (GF_Node*)ni);
	gf_node_register((GF_Node *)ni, (GF_Node *)gr);
	gf_sg_vrml_mf_reset(&ni->type, GF_SG_VRML_MFSTRING);
	gf_sg_vrml_mf_alloc(&ni->type, GF_SG_VRML_MFSTRING, 1);
	ni->type.vals[0] = gf_strdup("QTVR");

	/*create ODs*/
	st = gf_sm_stream_new(load->ctx, 2, GF_STREAM_OD, 1);
	au = gf_sm_stream_au_new(st, 0, 0, 1);
	odU = (GF_ODUpdate*) gf_odf_com_new(GF_ODF_OD_UPDATE_TAG);
	gf_list_add(au->commands, odU);
	for (i=0; i<6; i++) {
		GF_MuxInfo *mi;
		FILE *img;
		char szName[1024];
		od = (GF_ObjectDescriptor *) gf_odf_desc_new(GF_ODF_OD_TAG);
		od->objectDescriptorID = 2+i;
		esd = gf_odf_desc_esd_new(2);
		esd->decoderConfig->streamType = GF_STREAM_VISUAL;
		esd->decoderConfig->objectTypeIndication = GPAC_OTI_IMAGE_JPEG;
		esd->ESID = 3+i;
		/*extract image and remember it*/
		mi = (GF_MuxInfo *) gf_odf_desc_new(GF_ODF_MUXINFO_TAG);
		gf_list_add(esd->extensionDescriptors, mi);
		mi->delete_file = 1;
		sprintf(szName, "%s_img%d.jpg", load->fileName, esd->ESID);
		mi->file_name = gf_strdup(szName);
		
		gf_list_add(od->ESDescriptors, esd);
		gf_list_add(odU->objectDescriptors, od);

		samp = gf_isom_get_sample(src, tk, i+1, &di);
		img = gf_f64_open(mi->file_name, "wb");
		fwrite(samp->data, samp->dataLength, 1, img);
		fclose(img);
		gf_isom_sample_del(&samp);
	}
	gf_isom_delete(src);
	return GF_OK;
}
Exemplo n.º 26
0
Arquivo: main.c Projeto: Bevara/GPAC
void bifs_to_vid(GF_ISOFile *file, char *szConfigFile, u32 width, u32 height, char *rad_name, u32 dump_type, char *out_dir, Double fps, s32 frameID, s32 dump_time)
{
	GF_User user;
	BIFSVID b2v;
	u16 es_id;
	Bool first_dump, needs_raw;
	u32 i, j, di, count, timescale, frameNum;
	u32 duration, cur_time;
	GF_VideoSurface fb;
	GF_Err e;
	char old_driv[1024];
	const char *test;
	char config_path[GF_MAX_PATH];
	avi_t *avi_out;
	Bool reset_fps;
	GF_ESD *esd;
	char comp[5];
	char *conv_buf;

	memset(&user, 0, sizeof(GF_User));
	if (szConfigFile && strlen(szConfigFile)) {
		user.config = gf_cfg_init(config_path, NULL);
	} else {
		user.config = gf_cfg_init(NULL, NULL);
	}

	if (!user.config) {
		fprintf(stdout, "Error: Configuration File \"%s\" not found in %s\n", GPAC_CFG_FILE, config_path);
		return;
	}
	avi_out = NULL;
	conv_buf = NULL;
	esd = NULL;
	needs_raw = 0;
	test = gf_cfg_get_key(user.config, "General", "ModulesDirectory");
	user.modules = gf_modules_new((const unsigned char *) test, user.config);
	strcpy(old_driv, "raw_out");
	if (!gf_modules_get_count(user.modules)) {
		printf("Error: no modules found\n");
		goto err_exit;
	}

	/*switch driver to raw_driver*/
	test = gf_cfg_get_key(user.config, "Video", "DriverName");
	if (test) strcpy(old_driv, test);

	test = gf_cfg_get_key(user.config, "Compositor", "RendererName");
	/*since we only support RGB24 for MP42AVI force using RAW out with 2D driver*/
	if (test && strstr(test, "2D")) {
		gf_cfg_set_key(user.config, "Video", "DriverName", "Raw Video Output");
		needs_raw = 1;
	}

	needs_raw = 0;
	user.init_flags = GF_TERM_NO_AUDIO | GF_TERM_FORCE_3D;
	b2v.sr = gf_sc_new(&user, 0, NULL);
	gf_sc_set_option(b2v.sr, GF_OPT_VISIBLE, 0);

	b2v.sg = gf_sg_new();
	gf_sg_set_scene_time_callback(b2v.sg, get_scene_time, &b2v);
	gf_sg_set_init_callback(b2v.sg, node_init, &b2v);
	gf_sg_set_modified_callback(b2v.sg, node_modif, &b2v);

	/*load config*/
	gf_sc_set_option(b2v.sr, GF_OPT_RELOAD_CONFIG, 1);

	b2v.bifs = gf_bifs_decoder_new(b2v.sg, 0);

	if (needs_raw) {
		test = gf_cfg_get_key(user.config, "Video", "DriverName");
		if (stricmp(test, "raw_out") && stricmp(test, "Raw Video Output")) {
			printf("couldn't load raw output driver (%s used)\n", test);
			goto err_exit;
		}
	}

	strcpy(config_path, "");
	if (out_dir) {
		strcat(config_path, out_dir);
		if (config_path[strlen(config_path)-1] != '\\') strcat(config_path, "\\");
	}
	strcat(config_path, rad_name);
	strcat(config_path, "_bifs");
	if (!dump_type) {
		strcat(config_path, ".avi");
		avi_out = AVI_open_output_file(config_path);
		comp[0] = comp[1] = comp[2] = comp[3] = comp[4] = 0;
		if (!avi_out) goto err_exit;
	}


	for (i=0; i<gf_isom_get_track_count(file); i++) {
		esd = gf_isom_get_esd(file, i+1, 1);
		if (!esd) continue;
		if (!esd->dependsOnESID && (esd->decoderConfig->streamType == GF_STREAM_SCENE)) break;
		gf_odf_desc_del((GF_Descriptor *) esd);
		esd = NULL;
	}
	if (!esd) {
		printf("no bifs track found\n");
		goto err_exit;
	}

	b2v.duration = gf_isom_get_media_duration(file, i+1);
	timescale = gf_isom_get_media_timescale(file, i+1);
	es_id = (u16) gf_isom_get_track_id(file, i+1);
	e = gf_bifs_decoder_configure_stream(b2v.bifs, es_id, esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, esd->decoderConfig->objectTypeIndication);
	if (e) {
		printf("BIFS init error %s\n", gf_error_to_string(e));
		gf_odf_desc_del((GF_Descriptor *) esd);
		esd = NULL;
		goto err_exit;
	}
	if (dump_time>=0) dump_time = dump_time *1000 / timescale;

	gf_sc_set_scene(b2v.sr, b2v.sg);
	count = gf_isom_get_sample_count(file, i+1);

	reset_fps = 0;
	if (!fps) {
		fps = (Float) (count * timescale);
		fps /= (Double) (s64) b2v.duration;
		printf("Estimated BIFS FrameRate %g\n", fps);
		reset_fps = 1;
	}

	if (!width || !height) {
		gf_sg_get_scene_size_info(b2v.sg, &width, &height);
	}
	/*we work in RGB24, and we must make sure the pitch is %4*/
	if ((width*3)%4) {
		printf("Adjusting width (%d) to have a stride multiple of 4\n", width);
		while ((width*3)%4) width--;
	}

	gf_sc_set_size(b2v.sr, width, height);
	gf_sc_draw_frame(b2v.sr);

	gf_sc_get_screen_buffer(b2v.sr, &fb);
	width = fb.width;
	height = fb.height;
	if (avi_out) {
		AVI_set_video(avi_out, width, height, fps, comp);
		conv_buf = gf_malloc(sizeof(char) * width * height * 3);
	}
	printf("Dumping at BIFS resolution %d x %d\n\n", width, height);
	gf_sc_release_screen_buffer(b2v.sr, &fb);

	cur_time = 0;

	duration = (u32)(timescale / fps);
	if (reset_fps) fps = 0;

	frameNum = 1;
	first_dump = 1;
	for (j=0; j<count; j++) {
		GF_ISOSample *samp = gf_isom_get_sample(file, i+1, j+1, &di);

		b2v.cts = samp->DTS + samp->CTS_Offset;
		/*apply command*/
		gf_bifs_decode_au(b2v.bifs, es_id, samp->data, samp->dataLength, ((Double)(s64)b2v.cts)/1000.0);
		gf_isom_sample_del(&samp);

		if ((frameID>=0) && (j<(u32)frameID)) continue;
		if ((dump_time>=0) && ((u32) dump_time>b2v.cts)) continue;
		/*render frame*/
		gf_sc_draw_frame(b2v.sr);
		/*needed for background2D !!*/
		if (first_dump) {
			gf_sc_draw_frame(b2v.sr);
			first_dump = 0;
		}

		if (fps) {
			if (cur_time > b2v.cts) continue;

			while (1) {
				printf("dumped frame time %f (frame %d - sample %d)\r", ((Float)cur_time)/timescale, frameNum, j+1);
				dump_frame(b2v, conv_buf, config_path, dump_type, avi_out, frameNum);
				frameNum++;
				cur_time += duration;
				if (cur_time > b2v.cts) break;
			}
		} else {
			dump_frame(b2v, conv_buf, config_path, dump_type, avi_out, (frameID>=0) ? frameID : frameNum);
			if (frameID>=0 || dump_time>=0) break;
			frameNum++;
			printf("dumped frame %d / %d\r", j+1, count);
		}

	}
	gf_odf_desc_del((GF_Descriptor *) esd);

	/*destroy everything*/
	gf_bifs_decoder_del(b2v.bifs);
	gf_sg_del(b2v.sg);
	gf_sc_set_scene(b2v.sr, NULL);
	gf_sc_del(b2v.sr);

err_exit:
	if (avi_out) AVI_close(avi_out);
	if (conv_buf) gf_free(conv_buf);
	if (user.modules) gf_modules_del(user.modules);
	if (needs_raw) gf_cfg_set_key(user.config, "Video", "DriverName", old_driv);
	gf_cfg_del(user.config);
}
Exemplo n.º 27
0
GF_EXPORT
GF_RTPHinter *gf_hinter_track_new(GF_ISOFile *file, u32 TrackNum, 
							u32 Path_MTU, u32 max_ptime, u32 default_rtp_rate, u32 flags, u8 PayloadID, 
							Bool copy_media, u32 InterleaveGroupID, u8 InterleaveGroupPriority, GF_Err *e)
{

	GF_SLConfig my_sl;
	u32 descIndex, MinSize, MaxSize, avgTS, streamType, oti, const_dur, nb_ch, maxDTSDelta;
	u8 OfficialPayloadID;
	u32 TrackMediaSubType, TrackMediaType, hintType, nbEdts, required_rate, force_dts_delta, avc_nalu_size, PL_ID, bandwidth, IV_length, KI_length;
	const char *url, *urn;
	char *mpeg4mode;
	Bool is_crypted, has_mpeg4_mapping;
	GF_RTPHinter *tmp;
	GF_ESD *esd;

	*e = GF_BAD_PARAM;
	if (!file || !TrackNum || !gf_isom_get_track_id(file, TrackNum)) return NULL;

	if (!gf_isom_get_sample_count(file, TrackNum)) {
		*e = GF_OK;
		return NULL;
	}
	*e = GF_NOT_SUPPORTED;
	nbEdts = gf_isom_get_edit_segment_count(file, TrackNum);
	if (nbEdts>1) {
		u64 et, sd, mt;
		u8 em;
		gf_isom_get_edit_segment(file, TrackNum, 1, &et, &sd, &mt, &em);
		if ((nbEdts>2) || (em!=GF_ISOM_EDIT_EMPTY)) {
			GF_LOG(GF_LOG_ERROR, GF_LOG_RTP, ("[rtp hinter] Cannot hint track whith EditList\n"));
			return NULL;
		}
	}
	if (nbEdts) gf_isom_remove_edit_segments(file, TrackNum);

	if (!gf_isom_is_track_enabled(file, TrackNum)) return NULL;

	/*by default NO PL signaled*/
	PL_ID = 0;
	OfficialPayloadID = 0;
	force_dts_delta = 0;
	streamType = oti = 0;
	mpeg4mode = NULL;
	required_rate = 0;
	is_crypted = 0;
	IV_length = KI_length = 0;
	oti = 0;
	nb_ch = 0;
	avc_nalu_size = 0;
	has_mpeg4_mapping = 1;
	TrackMediaType = gf_isom_get_media_type(file, TrackNum);
	TrackMediaSubType = gf_isom_get_media_subtype(file, TrackNum, 1);
	
	/*for max compatibility with QT*/
	if (!default_rtp_rate) default_rtp_rate = 90000;

	/*timed-text is a bit special, we support multiple stream descriptions & co*/
	if ( (TrackMediaType==GF_ISOM_MEDIA_TEXT) || (TrackMediaType==GF_ISOM_MEDIA_SUBT)) {
		hintType = GF_RTP_PAYT_3GPP_TEXT;
		oti = GPAC_OTI_TEXT_MPEG4;
		streamType = GF_STREAM_TEXT;
		/*fixme - this works cos there's only one PL for text in mpeg4 at the current time*/
		PL_ID = 0x10;
	} else {
		if (gf_isom_get_sample_description_count(file, TrackNum) > 1) return NULL;

		TrackMediaSubType = gf_isom_get_media_subtype(file, TrackNum, 1);
		switch (TrackMediaSubType) {
		case GF_ISOM_SUBTYPE_MPEG4_CRYP: 
			is_crypted = 1;
		case GF_ISOM_SUBTYPE_MPEG4:
			esd = gf_isom_get_esd(file, TrackNum, 1);
			hintType = GF_RTP_PAYT_MPEG4;
			if (esd) {
				streamType = esd->decoderConfig->streamType;
				oti = esd->decoderConfig->objectTypeIndication;
				if (esd->URLString) hintType = 0;
				/*AAC*/
				if ((streamType==GF_STREAM_AUDIO) && esd->decoderConfig->decoderSpecificInfo
				/*(nb: we use mpeg4 for MPEG-2 AAC)*/
				&& ((oti==GPAC_OTI_AUDIO_AAC_MPEG4) || (oti==GPAC_OTI_AUDIO_AAC_MPEG4) || (oti==GPAC_OTI_AUDIO_AAC_MPEG2_MP) || (oti==GPAC_OTI_AUDIO_AAC_MPEG2_LCP) || (oti==GPAC_OTI_AUDIO_AAC_MPEG2_SSRP)) ) {

					u32 sample_rate;
					GF_M4ADecSpecInfo a_cfg;
					gf_m4a_get_config(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, &a_cfg);
					nb_ch = a_cfg.nb_chan;
					sample_rate = a_cfg.base_sr;
					PL_ID = a_cfg.audioPL;
					switch (a_cfg.base_object_type) {
					case GF_M4A_AAC_MAIN:
					case GF_M4A_AAC_LC:
						if (flags & GP_RTP_PCK_USE_LATM_AAC) {
							hintType = GF_RTP_PAYT_LATM;
							break;
						}
					case GF_M4A_AAC_SBR:
					case GF_M4A_AAC_PS:
					case GF_M4A_AAC_LTP:
					case GF_M4A_AAC_SCALABLE:
					case GF_M4A_ER_AAC_LC:
					case GF_M4A_ER_AAC_LTP:
					case GF_M4A_ER_AAC_SCALABLE:
						mpeg4mode = "AAC";
						break;
					case GF_M4A_CELP:
					case GF_M4A_ER_CELP:
						mpeg4mode = "CELP";
						break;
					}
					required_rate = sample_rate;
				}
				/*MPEG1/2 audio*/
				else if ((streamType==GF_STREAM_AUDIO) && ((oti==GPAC_OTI_AUDIO_MPEG2_PART3) || (oti==GPAC_OTI_AUDIO_MPEG1))) {
					u32 sample_rate;
					if (!is_crypted) {
						GF_ISOSample *samp = gf_isom_get_sample(file, TrackNum, 1, NULL);
						u32 hdr = GF_4CC((u8)samp->data[0], (u8)samp->data[1], (u8)samp->data[2], (u8)samp->data[3]);
						nb_ch = gf_mp3_num_channels(hdr);
						sample_rate = gf_mp3_sampling_rate(hdr);
						gf_isom_sample_del(&samp);
						hintType = GF_RTP_PAYT_MPEG12_AUDIO;
						/*use official RTP/AVP payload type*/
						OfficialPayloadID = 14;
						required_rate = 90000;
					}
					/*encrypted MP3 must be sent through MPEG-4 generic to signal all ISMACryp stuff*/
					else {
						u8 bps;
						gf_isom_get_audio_info(file, TrackNum, 1, &sample_rate, &nb_ch, &bps);
						required_rate = sample_rate;
					}
				}
				/*QCELP audio*/
				else if ((streamType==GF_STREAM_AUDIO) && (oti==GPAC_OTI_AUDIO_13K_VOICE)) {
					hintType = GF_RTP_PAYT_QCELP;
					OfficialPayloadID = 12;
					required_rate = 8000;
					streamType = GF_STREAM_AUDIO;
					nb_ch = 1;
				}
				/*EVRC/SVM audio*/
				else if ((streamType==GF_STREAM_AUDIO) && ((oti==GPAC_OTI_AUDIO_EVRC_VOICE) || (oti==GPAC_OTI_AUDIO_SMV_VOICE)) ) {
					hintType = GF_RTP_PAYT_EVRC_SMV;
					required_rate = 8000;
					streamType = GF_STREAM_AUDIO;
					nb_ch = 1;
				}
				/*visual streams*/
				else if (streamType==GF_STREAM_VISUAL) {
					if (oti==GPAC_OTI_VIDEO_MPEG4_PART2) {
						GF_M4VDecSpecInfo dsi;
						gf_m4v_get_config(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, &dsi);
						PL_ID = dsi.VideoPL;
					}
					/*MPEG1/2 video*/
					if ( ((oti>=GPAC_OTI_VIDEO_MPEG2_SIMPLE) && (oti<=GPAC_OTI_VIDEO_MPEG2_422)) || (oti==GPAC_OTI_VIDEO_MPEG1)) {
						if (!is_crypted) {
							hintType = GF_RTP_PAYT_MPEG12_VIDEO;
							OfficialPayloadID = 32;
						}
					}
					/*for ISMA*/
					if (is_crypted) {
						/*that's another pain with ISMACryp, even if no B-frames the DTS is signaled...*/
						if (oti==GPAC_OTI_VIDEO_MPEG4_PART2) force_dts_delta = 22;
						else if (oti==GPAC_OTI_VIDEO_AVC) {
							flags &= ~GP_RTP_PCK_USE_MULTI;
							force_dts_delta = 22;
						}
						flags |= GP_RTP_PCK_SIGNAL_RAP | GP_RTP_PCK_SIGNAL_TS;
					}

					required_rate = default_rtp_rate;
				}
				/*systems streams*/
				else if (gf_isom_has_sync_shadows(file, TrackNum) || gf_isom_has_sample_dependency(file, TrackNum)) {
					flags |= GP_RTP_PCK_SYSTEMS_CAROUSEL;
				}
				gf_odf_desc_del((GF_Descriptor*)esd);
			}
			break;
		case GF_ISOM_SUBTYPE_3GP_H263:
			hintType = GF_RTP_PAYT_H263;
			required_rate = 90000;
			streamType = GF_STREAM_VISUAL;
			OfficialPayloadID = 34;
			/*not 100% compliant (short header is missing) but should still work*/
			oti = GPAC_OTI_VIDEO_MPEG4_PART2;
			PL_ID = 0x01;
			break;
		case GF_ISOM_SUBTYPE_3GP_AMR:
			required_rate = 8000;
			hintType = GF_RTP_PAYT_AMR;
			streamType = GF_STREAM_AUDIO;
			has_mpeg4_mapping = 0;
			nb_ch = 1;
			break;
		case GF_ISOM_SUBTYPE_3GP_AMR_WB:
			required_rate = 16000;
			hintType = GF_RTP_PAYT_AMR_WB;
			streamType = GF_STREAM_AUDIO;
			has_mpeg4_mapping = 0;
			nb_ch = 1;
			break;
		case GF_ISOM_SUBTYPE_AVC_H264:
		case GF_ISOM_SUBTYPE_AVC2_H264:
		case GF_ISOM_SUBTYPE_SVC_H264:
		{
			GF_AVCConfig *avcc = gf_isom_avc_config_get(file, TrackNum, 1);
			required_rate = 90000;	/* "90 kHz clock rate MUST be used"*/
			hintType = GF_RTP_PAYT_H264_AVC;
			streamType = GF_STREAM_VISUAL;
			avc_nalu_size = avcc->nal_unit_size;
			oti = GPAC_OTI_VIDEO_AVC;
			PL_ID = 0x0F;
			gf_odf_avc_cfg_del(avcc);
		}
			break;
		case GF_ISOM_SUBTYPE_3GP_QCELP:
			required_rate = 8000;
			hintType = GF_RTP_PAYT_QCELP;
			streamType = GF_STREAM_AUDIO;
			oti = GPAC_OTI_AUDIO_13K_VOICE;
			OfficialPayloadID = 12;
			nb_ch = 1;
			break;
		case GF_ISOM_SUBTYPE_3GP_EVRC:
		case GF_ISOM_SUBTYPE_3GP_SMV:
			required_rate = 8000;
			hintType = GF_RTP_PAYT_EVRC_SMV;
			streamType = GF_STREAM_AUDIO;
			oti = (TrackMediaSubType==GF_ISOM_SUBTYPE_3GP_EVRC) ? GPAC_OTI_AUDIO_EVRC_VOICE : GPAC_OTI_AUDIO_SMV_VOICE;
			nb_ch = 1;
			break;
		case GF_ISOM_SUBTYPE_3GP_DIMS:
			hintType = GF_RTP_PAYT_3GPP_DIMS;
			streamType = GF_STREAM_SCENE;
			break;
		case GF_ISOM_SUBTYPE_AC3:
			hintType = GF_RTP_PAYT_AC3;
			streamType = GF_STREAM_AUDIO;
			gf_isom_get_audio_info(file, TrackNum, 1, NULL, &nb_ch, NULL);
			break;
		default:
			/*ERROR*/
			hintType = 0;
			break;
		}
	}

	/*not hintable*/
	if (!hintType) return NULL;
	/*we only support self-contained files for hinting*/
	gf_isom_get_data_reference(file, TrackNum, 1, &url, &urn);
	if (url || urn) return NULL;
	
	*e = GF_OUT_OF_MEM;
	GF_SAFEALLOC(tmp, GF_RTPHinter);
	if (!tmp) return NULL;

	/*override hinter type if requested and possible*/
	if (has_mpeg4_mapping && (flags & GP_RTP_PCK_FORCE_MPEG4)) {
		hintType = GF_RTP_PAYT_MPEG4;
		avc_nalu_size = 0;
	}
	/*use static payload ID if enabled*/
	else if (OfficialPayloadID && (flags & GP_RTP_PCK_USE_STATIC_ID) ) {
		PayloadID = OfficialPayloadID;
	}

	tmp->file = file;
	tmp->TrackNum = TrackNum;
	tmp->avc_nalu_size = avc_nalu_size;
	tmp->nb_chan = nb_ch;

	/*spatial scalability check*/
	tmp->has_ctts = gf_isom_has_time_offset(file, TrackNum);

	/*get sample info*/
	gf_media_get_sample_average_infos(file, TrackNum, &MinSize, &MaxSize, &avgTS, &maxDTSDelta, &const_dur, &bandwidth);

	/*systems carousel: we need at least IDX and RAP signaling*/
	if (flags & GP_RTP_PCK_SYSTEMS_CAROUSEL) {
		flags |= GP_RTP_PCK_SIGNAL_RAP;
	}

	/*update flags in MultiSL*/
	if (flags & GP_RTP_PCK_USE_MULTI) {
		if (MinSize != MaxSize) flags |= GP_RTP_PCK_SIGNAL_SIZE;
		if (!const_dur) flags |= GP_RTP_PCK_SIGNAL_TS;
	}
	if (tmp->has_ctts) flags |= GP_RTP_PCK_SIGNAL_TS;

	/*default SL for RTP */
	InitSL_RTP(&my_sl);

	my_sl.timestampResolution = gf_isom_get_media_timescale(file, TrackNum);
	/*override clockrate if set*/
	if (required_rate) {
		Double sc = required_rate;
		sc /= my_sl.timestampResolution;
		maxDTSDelta = (u32) (maxDTSDelta*sc);
		my_sl.timestampResolution = required_rate;
	}
	/*switch to RTP TS*/
	max_ptime = (u32) (max_ptime * my_sl.timestampResolution / 1000);

	my_sl.AUSeqNumLength = gf_get_bit_size(gf_isom_get_sample_count(file, TrackNum));
	my_sl.CUDuration = const_dur;

	if (gf_isom_has_sync_points(file, TrackNum)) {
		my_sl.useRandomAccessPointFlag = 1;
	} else {
		my_sl.useRandomAccessPointFlag = 0;
		my_sl.hasRandomAccessUnitsOnlyFlag = 1;
	}

	if (is_crypted) {
		Bool use_sel_enc;
		gf_isom_get_ismacryp_info(file, TrackNum, 1, NULL, NULL, NULL, NULL, NULL, &use_sel_enc, &IV_length, &KI_length);
		if (use_sel_enc) flags |= GP_RTP_PCK_SELECTIVE_ENCRYPTION;
	}

	// in case a different timescale was provided
	tmp->OrigTimeScale = gf_isom_get_media_timescale(file, TrackNum);
	tmp->rtp_p = gf_rtp_builder_new(hintType, &my_sl, flags, tmp, 
								MP4T_OnNewPacket, MP4T_OnPacketDone, 
								/*if copy, no data ref*/
								copy_media ? NULL : MP4T_OnDataRef, 
								MP4T_OnData);

	//init the builder
	gf_rtp_builder_init(tmp->rtp_p, PayloadID, Path_MTU, max_ptime,
					   streamType, oti, PL_ID, MinSize, MaxSize, avgTS, maxDTSDelta, IV_length, KI_length, mpeg4mode);

	/*ISMA compliance is a pain...*/
	if (force_dts_delta) tmp->rtp_p->slMap.DTSDeltaLength = force_dts_delta;


	/*		Hint Track Setup	*/
	tmp->TrackID = gf_isom_get_track_id(file, TrackNum);
	tmp->HintID = tmp->TrackID + 65535;
	while (gf_isom_get_track_by_id(file, tmp->HintID)) tmp->HintID++;

	tmp->HintTrack = gf_isom_new_track(file, tmp->HintID, GF_ISOM_MEDIA_HINT, my_sl.timestampResolution);
	gf_isom_setup_hint_track(file, tmp->HintTrack, GF_ISOM_HINT_RTP);
	/*create a hint description*/
	gf_isom_new_hint_description(file, tmp->HintTrack, -1, -1, 0, &descIndex);
	gf_isom_rtp_set_timescale(file, tmp->HintTrack, descIndex, my_sl.timestampResolution);

	if (hintType==GF_RTP_PAYT_MPEG4) {
		tmp->rtp_p->slMap.ObjectTypeIndication = oti;
		/*set this SL for extraction.*/
		gf_isom_set_extraction_slc(file, TrackNum, 1, &my_sl);
	}
	tmp->bandwidth = bandwidth;

	/*set interleaving*/
	gf_isom_set_track_group(file, TrackNum, InterleaveGroupID);
	if (!copy_media) {
		/*if we don't copy data set hint track and media track in the same group*/
		gf_isom_set_track_group(file, tmp->HintTrack, InterleaveGroupID);
	} else {
		gf_isom_set_track_group(file, tmp->HintTrack, InterleaveGroupID + OFFSET_HINT_GROUP_ID);
	}
	/*use user-secified priority*/
	InterleaveGroupPriority*=2;
	gf_isom_set_track_priority_in_group(file, TrackNum, InterleaveGroupPriority+1);
	gf_isom_set_track_priority_in_group(file, tmp->HintTrack, InterleaveGroupPriority);

#if 0
	/*QT FF: not setting these flags = server uses a random offset*/
	gf_isom_rtp_set_time_offset(file, tmp->HintTrack, 1, 0);
	/*we don't use seq offset for maintainance pruposes*/
	gf_isom_rtp_set_time_sequence_offset(file, tmp->HintTrack, 1, 0);
#endif
	*e = GF_OK;
	return tmp;
}
Exemplo n.º 28
0
GF_EXPORT
GF_Err gf_isom_streamer_send_next_packet(GF_ISOMRTPStreamer *streamer, s32 send_ahead_delay, s32 max_sleep_time)
{
	GF_Err e = GF_OK;
	GF_RTPTrack *track, *to_send;
	u32 time, duration;
	s32 diff;
	u64 min_ts, dts, cts;

	if (!streamer) return GF_BAD_PARAM;

	/*browse all sessions and locate most mature stream*/
	to_send = NULL;
	min_ts = (u64) -1;

	time = gf_sys_clock();

	/*init session timeline - all sessions are sync'ed for packet scheduling purposes*/
	if (!streamer->timelineOrigin) {
		streamer->timelineOrigin = time*1000;
		GF_LOG(GF_LOG_INFO, GF_LOG_RTP, ("[FileStreamer] RTP session %s initialized - time origin set to %d\n", gf_isom_get_filename(streamer->isom), time));
	}

	track = streamer->stream;
	while (track) {
		/*load next AU*/
		gf_isom_set_nalu_extract_mode(streamer->isom, track->track_num, GF_ISOM_NALU_EXTRACT_LAYER_ONLY);
		if (!track->au) {
			if (track->current_au >= track->nb_aus) {
				Double scale;
				if (!streamer->loop) {
					track = track->next;
					continue;
				}
				/*increment ts offset*/
				scale = track->timescale/1000.0;
				track->ts_offset += (u32) (streamer->duration_ms * scale);
				track->microsec_ts_offset = (u32) (track->ts_offset*(1000000.0/track->timescale)) + streamer->timelineOrigin;
				track->current_au = 0;
			}

			track->au = gf_isom_get_sample(streamer->isom, track->track_num, track->current_au + 1, &track->sample_desc_index);
			track->current_au ++;
			if (track->au) {
				track->microsec_dts = (u64) (track->microsec_ts_scale * (s64) (track->au->DTS)) + track->microsec_ts_offset + streamer->timelineOrigin;
			}
		}

		/*check timing*/
		if (track->au) {
			if (min_ts > track->microsec_dts) {
				min_ts = track->microsec_dts;
				to_send = track;
			}
		}

		track = track->next;
	}

	/*no input data ...*/
	if( !to_send) return GF_EOS;


	/*we are about to send scalable base: trigger RTCP reports with the same NTP. This avoids
	NTP drift due to system clock precision which could break sync decoding*/
	if (!streamer->first_RTCP_sent || (streamer->base_track && streamer->base_track==to_send->track_num)) {
		u32 ntp_sec, ntp_frac;
		/*force sending RTCP SR every RAP ? - not really compliant but we cannot perform scalable tuning otherwise*/
		u32 ntp_type = to_send->au->IsRAP ? 2 : 1;
		gf_net_get_ntp(&ntp_sec, &ntp_frac);
		track = streamer->stream;
		while (track) {
			u32 ts = (u32) (track->au->DTS + track->au->CTS_Offset + track->ts_offset);
			gf_rtp_streamer_send_rtcp(track->rtp, GF_TRUE, ts, ntp_type, ntp_sec, ntp_frac);
			track = track->next;
		}

		streamer->first_RTCP_sent = 1;
	}

	min_ts /= 1000;

	if (max_sleep_time) {
		diff = ((u32) min_ts) - gf_sys_clock();
		if (diff>max_sleep_time)
			return GF_OK;
	}


	/*sleep until TS is mature*/
	while (1) {
		diff = ((u32) min_ts) - gf_sys_clock();

		if (diff > send_ahead_delay) {
			gf_sleep(1);
		} else {
			if (diff<10) {
				GF_LOG(GF_LOG_DEBUG, GF_LOG_RTP, ("WARNING: RTP session %s stream %d - sending packet %d ms too late\n", gf_isom_get_filename(streamer->isom), to_send->track_num, -diff));
			}
			break;
		}
	}

	/*send packets*/
	dts = to_send->au->DTS + to_send->ts_offset;
	cts = to_send->au->DTS + to_send->au->CTS_Offset + to_send->ts_offset;
	duration = gf_isom_get_sample_duration(streamer->isom, to_send->track_num, to_send->current_au);

	GF_LOG(GF_LOG_INFO, GF_LOG_RTP, ("[FileStreamer] Sending RTP packets for track %d AU %d/%d DTS "LLU" - CTS "LLU" - RTP TS "LLU" - size %d - RAP %d\n", to_send->track_num, to_send->current_au, to_send->nb_aus, to_send->au->DTS, to_send->au->DTS+to_send->au->CTS_Offset, cts, to_send->au->dataLength, to_send->au->IsRAP ) );

	/*unpack nal units*/
	if (to_send->avc_nalu_size) {
		Bool au_start, au_end;
		u32 v, size;
		u32 remain = to_send->au->dataLength;
		char *ptr = to_send->au->data;

		au_start = 1;
		au_end = 0;
		while (remain) {
			size = 0;
			v = to_send->avc_nalu_size;
			while (v) {
				size |= (u8) *ptr;
				ptr++;
				remain--;
				v-=1;
				if (v) size<<=8;
			}
			if (remain < size) {
				GF_LOG(GF_LOG_ERROR, GF_LOG_RTP, ("[rtp hinter] Broken AVC nalu encapsulation: NALU size is %d but only %d bytes left in sample %d\n", size, remain, to_send->current_au));
				break;
			}
			remain -= size;
			au_end = remain ? 0 : 1;

			e = gf_rtp_streamer_send_data(to_send->rtp, ptr, size, to_send->au->dataLength, cts, dts, (to_send->au->IsRAP==RAP) ? 1 : 0, au_start, au_end, to_send->current_au, duration, to_send->sample_desc_index);
			ptr += size;
			au_start = 0;
		}
	} else {
		e = gf_rtp_streamer_send_data(to_send->rtp, to_send->au->data, to_send->au->dataLength, to_send->au->dataLength, cts, dts, (to_send->au->IsRAP==RAP) ? 1 : 0, 1, 1, to_send->current_au, duration, to_send->sample_desc_index);
	}
	/*delete sample*/
	gf_isom_sample_del(&to_send->au);

	return e;
}
Exemplo n.º 29
0
GF_EXPORT
GF_Err gf_hinter_finalize(GF_ISOFile *file, u32 IOD_Profile, u32 bandwidth)
{
	u32 i, sceneT, odT, descIndex, size, size64;
	GF_InitialObjectDescriptor *iod;
	GF_SLConfig slc;
	GF_ESD *esd;
	GF_ISOSample *samp;
	Bool remove_ocr;
	char *buffer;
	char buf64[5000], sdpLine[2300];


	gf_isom_sdp_clean(file);

	if (bandwidth) {
		sprintf(buf64, "b=AS:%d", bandwidth);
		gf_isom_sdp_add_line(file, buf64);
	}
	//xtended attribute for copyright
	sprintf(buf64, "a=x-copyright: %s", "MP4/3GP File hinted with GPAC " GPAC_FULL_VERSION " (C)2000-2005 - http://gpac.sourceforge.net");
	gf_isom_sdp_add_line(file, buf64);

	if (IOD_Profile == GF_SDP_IOD_NONE) return GF_OK;

	odT = sceneT = 0;
	for (i=0; i<gf_isom_get_track_count(file); i++) {
		if (!gf_isom_is_track_in_root_od(file, i+1)) continue;
		switch (gf_isom_get_media_type(file,i+1)) {
		case GF_ISOM_MEDIA_OD:
			odT = i+1;
			break;
		case GF_ISOM_MEDIA_SCENE:
			sceneT = i+1;
			break;
		}
	}
	remove_ocr = 0;
	if (IOD_Profile == GF_SDP_IOD_ISMA_STRICT) {
		IOD_Profile = GF_SDP_IOD_ISMA;
		remove_ocr = 1;
	}

	/*if we want ISMA like iods, we need at least BIFS */
	if ( (IOD_Profile == GF_SDP_IOD_ISMA) && !sceneT ) return GF_BAD_PARAM;

	/*do NOT change PLs, we assume they are correct*/
	iod = (GF_InitialObjectDescriptor *) gf_isom_get_root_od(file);
	if (!iod) return GF_NOT_SUPPORTED;

	/*rewrite an IOD with good SL config - embbed data if possible*/
	if (IOD_Profile == GF_SDP_IOD_ISMA) {
		Bool is_ok = 1;
		while (gf_list_count(iod->ESDescriptors)) {
			esd = (GF_ESD*)gf_list_get(iod->ESDescriptors, 0);
			gf_odf_desc_del((GF_Descriptor *) esd);
			gf_list_rem(iod->ESDescriptors, 0);
		}


		/*get OD esd, and embbed stream data if possible*/
		if (odT) {
			esd = gf_isom_get_esd(file, odT, 1);
			if (gf_isom_get_sample_count(file, odT)==1) {
				samp = gf_isom_get_sample(file, odT, 1, &descIndex);
				if (gf_hinter_can_embbed_data(samp->data, samp->dataLength, GF_STREAM_OD)) {
					InitSL_NULL(&slc);
					slc.predefined = 0;
					slc.hasRandomAccessUnitsOnlyFlag = 1;
					slc.timeScale = slc.timestampResolution = gf_isom_get_media_timescale(file, odT);	
					slc.OCRResolution = 1000;
					slc.startCTS = samp->DTS+samp->CTS_Offset;
					slc.startDTS = samp->DTS;
					//set the SL for future extraction
					gf_isom_set_extraction_slc(file, odT, 1, &slc);

					size64 = gf_base64_encode(samp->data, samp->dataLength, buf64, 2000);
					buf64[size64] = 0;
					sprintf(sdpLine, "data:application/mpeg4-od-au;base64,%s", buf64);

					esd->decoderConfig->avgBitrate = 0;
					esd->decoderConfig->bufferSizeDB = samp->dataLength;
					esd->decoderConfig->maxBitrate = 0;
					size64 = strlen(sdpLine)+1;
					esd->URLString = (char*)gf_malloc(sizeof(char) * size64);
					strcpy(esd->URLString, sdpLine);
				} else {
					GF_LOG(GF_LOG_WARNING, GF_LOG_RTP, ("[rtp hinter] OD sample too large to be embedded in IOD - ISMA disabled\n"));
					is_ok = 0;
				}
				gf_isom_sample_del(&samp);
			}
			if (remove_ocr) esd->OCRESID = 0;
			else if (esd->OCRESID == esd->ESID) esd->OCRESID = 0;
			
			//OK, add this to our IOD
			gf_list_add(iod->ESDescriptors, esd);
		}

		esd = gf_isom_get_esd(file, sceneT, 1);
		if (gf_isom_get_sample_count(file, sceneT)==1) {
			samp = gf_isom_get_sample(file, sceneT, 1, &descIndex);
			if (gf_hinter_can_embbed_data(samp->data, samp->dataLength, GF_STREAM_SCENE)) {

				slc.timeScale = slc.timestampResolution = gf_isom_get_media_timescale(file, sceneT);	
				slc.OCRResolution = 1000;
				slc.startCTS = samp->DTS+samp->CTS_Offset;
				slc.startDTS = samp->DTS;
				//set the SL for future extraction
				gf_isom_set_extraction_slc(file, sceneT, 1, &slc);
				//encode in Base64 the sample
				size64 = gf_base64_encode(samp->data, samp->dataLength, buf64, 2000);
				buf64[size64] = 0;
				sprintf(sdpLine, "data:application/mpeg4-bifs-au;base64,%s", buf64);

				esd->decoderConfig->avgBitrate = 0;
				esd->decoderConfig->bufferSizeDB = samp->dataLength;
				esd->decoderConfig->maxBitrate = 0;
				esd->URLString = (char*)gf_malloc(sizeof(char) * (strlen(sdpLine)+1));
				strcpy(esd->URLString, sdpLine);
			} else {
				GF_LOG(GF_LOG_ERROR, GF_LOG_RTP, ("[rtp hinter] Scene description sample too large to be embedded in IOD - ISMA disabled\n"));
				is_ok = 0;
			}
			gf_isom_sample_del(&samp);
		}
		if (remove_ocr) esd->OCRESID = 0;
		else if (esd->OCRESID == esd->ESID) esd->OCRESID = 0;

		gf_list_add(iod->ESDescriptors, esd);

		if (is_ok) {
			u32 has_a, has_v, has_i_a, has_i_v;
			has_a = has_v = has_i_a = has_i_v = 0;
			for (i=0; i<gf_isom_get_track_count(file); i++) {
				esd = gf_isom_get_esd(file, i+1, 1);
				if (!esd) continue;
				if (esd->decoderConfig->streamType==GF_STREAM_VISUAL) {
					if (esd->decoderConfig->objectTypeIndication==GPAC_OTI_VIDEO_MPEG4_PART2) has_i_v ++;
					else has_v++;
				} else if (esd->decoderConfig->streamType==GF_STREAM_AUDIO) {
					if (esd->decoderConfig->objectTypeIndication==GPAC_OTI_AUDIO_AAC_MPEG4) has_i_a ++;
					else has_a++;
				}
				gf_odf_desc_del((GF_Descriptor *)esd);
			}
			/*only 1 MPEG-4 visual max and 1 MPEG-4 audio max for ISMA compliancy*/
			if (!has_v && !has_a && (has_i_v<=1) && (has_i_a<=1)) {
				sprintf(sdpLine, "a=isma-compliance:1,1.0,1");
				gf_isom_sdp_add_line(file, sdpLine);
			}
		}
	}

	//encode the IOD
	buffer = NULL;
	size = 0;
	gf_odf_desc_write((GF_Descriptor *) iod, &buffer, &size);
	gf_odf_desc_del((GF_Descriptor *)iod);

	//encode in Base64 the iod
	size64 = gf_base64_encode(buffer, size, buf64, 2000);
	buf64[size64] = 0;
	gf_free(buffer);

	sprintf(sdpLine, "a=mpeg4-iod:\"data:application/mpeg4-iod;base64,%s\"", buf64);
	gf_isom_sdp_add_line(file, sdpLine);

	return GF_OK;
}
Exemplo n.º 30
0
Arquivo: main.c Projeto: Bevara/GPAC
/*generates an intertwined bmp from a scene file with 5 different viewpoints*/
void bifs3d_viewpoints_merger(GF_ISOFile *file, char *szConfigFile, u32 width, u32 height, char *rad_name, u32 dump_type, char *out_dir, Double fps, s32 frameID, s32 dump_time)
{
	GF_User user;
	char out_path[GF_MAX_PATH];
	char old_driv[1024];
	BIFSVID b2v;
	Bool needs_raw;
	GF_Err e;
	GF_VideoSurface fb;
	unsigned char **rendered_frames;
	u32 nb_viewpoints = 5;
	u32 viewpoint_index;


	/* Configuration of the Rendering Capabilities */
	{
		const char *test;
		char config_path[GF_MAX_PATH];
		memset(&user, 0, sizeof(GF_User));
		user.config = gf_cfg_init(szConfigFile, NULL);

		if (!user.config) {
			fprintf(stdout, "Error: Configuration File \"%s\" not found in %s\n", GPAC_CFG_FILE, config_path);
			return;
		}

		test = gf_cfg_get_key(user.config, "General", "ModulesDirectory");
		user.modules = gf_modules_new((const unsigned char *) test, user.config);
		strcpy(old_driv, "raw_out");
		if (!gf_modules_get_count(user.modules)) {
			printf("Error: no modules found\n");
			goto err_exit;
		}

		/*switch driver to raw_driver*/
		test = gf_cfg_get_key(user.config, "Video", "DriverName");
		if (test) strcpy(old_driv, test);

		needs_raw = 0;
		test = gf_cfg_get_key(user.config, "Compositor", "RendererName");
		/*since we only support RGB24 for MP42AVI force using RAW out with 2D driver*/
		if (test && strstr(test, "2D")) {
			gf_cfg_set_key(user.config, "Video", "DriverName", "Raw Video Output");
			needs_raw = 1;
		}
		if (needs_raw) {
			test = gf_cfg_get_key(user.config, "Video", "DriverName");
			if (stricmp(test, "raw_out") && stricmp(test, "Raw Video Output")) {
				printf("couldn't load raw output driver (%s used)\n", test);
				goto err_exit;
			}
		}
	}

	memset(&b2v, 0, sizeof(BIFSVID));
	user.init_flags = GF_TERM_NO_AUDIO;
	/* Initialization of the compositor */
	b2v.sr = gf_sc_new(&user, 0, NULL);
	gf_sc_set_option(b2v.sr, GF_OPT_VISIBLE, 0);

	/* Initialization of the scene graph */
	b2v.sg = gf_sg_new();
	gf_sg_set_scene_time_callback(b2v.sg, get_scene_time, &b2v);
	gf_sg_set_init_callback(b2v.sg, node_init, &b2v);
	gf_sg_set_modified_callback(b2v.sg, node_modif, &b2v);

	/*load config*/
	gf_sc_set_option(b2v.sr, GF_OPT_RELOAD_CONFIG, 1);

	{
		u32 di;
		u32 track_number;
		GF_ESD *esd;
		u16 es_id;
		b2v.bifs = gf_bifs_decoder_new(b2v.sg, 0);

		for (track_number=0; track_number<gf_isom_get_track_count(file); track_number++) {
			esd = gf_isom_get_esd(file, track_number+1, 1);
			if (!esd) continue;
			if (!esd->dependsOnESID && (esd->decoderConfig->streamType == GF_STREAM_SCENE)) break;
			gf_odf_desc_del((GF_Descriptor *) esd);
			esd = NULL;
		}
		if (!esd) {
			printf("no bifs track found\n");
			goto err_exit;
		}

		es_id = (u16) gf_isom_get_track_id(file, track_number+1);
		e = gf_bifs_decoder_configure_stream(b2v.bifs, es_id, esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, esd->decoderConfig->objectTypeIndication);
		if (e) {
			printf("BIFS init error %s\n", gf_error_to_string(e));
			gf_odf_desc_del((GF_Descriptor *) esd);
			esd = NULL;
			goto err_exit;
		}

		{
			GF_ISOSample *samp = gf_isom_get_sample(file, track_number+1, 1, &di);
			b2v.cts = samp->DTS + samp->CTS_Offset;
			/*apply command*/
			gf_bifs_decode_au(b2v.bifs, es_id, samp->data, samp->dataLength, ((Double)(s64)b2v.cts)/1000.0);
			gf_isom_sample_del(&samp);
		}

		b2v.duration = gf_isom_get_media_duration(file, track_number+1);

		gf_odf_desc_del((GF_Descriptor *) esd);

	}
	gf_sc_set_scene(b2v.sr, b2v.sg);

	if (!width || !height) {
		gf_sg_get_scene_size_info(b2v.sg, &width, &height);
	}
	/*we work in RGB24, and we must make sure the pitch is %4*/
	if ((width*3)%4) {
		printf("Adjusting width (%d) to have a stride multiple of 4\n", width);
		while ((width*3)%4) width--;
	}
	gf_sc_set_size(b2v.sr, width, height);
	gf_sc_get_screen_buffer(b2v.sr, &fb);
	width = fb.width;
	height = fb.height;
	gf_sc_release_screen_buffer(b2v.sr, &fb);

	GF_SAFEALLOC(rendered_frames, nb_viewpoints*sizeof(char *));
	for (viewpoint_index = 1; viewpoint_index <= nb_viewpoints; viewpoint_index++) {
		GF_SAFEALLOC(rendered_frames[viewpoint_index-1], fb.width*fb.height*3);
		gf_sc_set_viewpoint(b2v.sr, viewpoint_index, NULL);
		gf_sc_draw_frame(b2v.sr);
		/*needed for background2D !!*/
		gf_sc_draw_frame(b2v.sr);
		strcpy(out_path, "");
		if (out_dir) {
			strcat(out_path, out_dir);
			if (out_path[strlen(out_path)-1] != '\\') strcat(out_path, "\\");
		}
		strcat(out_path, rad_name);
		strcat(out_path, "_view");
		gf_sc_get_screen_buffer(b2v.sr, &fb);
		write_bmp(&fb, out_path, viewpoint_index);
		memcpy(rendered_frames[viewpoint_index-1], fb.video_buffer, fb.width*fb.height*3);
		gf_sc_release_screen_buffer(b2v.sr, &fb);
	}

	if (width != 800 || height != 480) {
		printf("Wrong scene dimension, cannot produce output\n");
		goto err_exit;
	} else {
		u32 x, y;
		GF_VideoSurface out_fb;
		u32 bpp = 3;
		out_fb.width = 800;
		out_fb.height = 480;
		out_fb.pitch = 800*bpp;
		out_fb.pixel_format = GF_PIXEL_RGB_24;
		out_fb.is_hardware_memory = 0;
		GF_SAFEALLOC(out_fb.video_buffer, out_fb.pitch*out_fb.height)
#if 1
		for (y=0; y<out_fb.height; y++) {
			/*starting red pixel is R1, R5, R4, R3, R2, R1, R5, ... when increasing line num*/
			u32 line_shift = (5-y) % 5;
			for (x=0; x<out_fb.width; x++) {
				u32 view_shift = (line_shift+bpp*x)%5;
				u32 offset = out_fb.pitch*y + x*bpp;
				/* red */
				out_fb.video_buffer[offset] = rendered_frames[view_shift][offset];
				/* green */
				out_fb.video_buffer[offset+1] = rendered_frames[(view_shift+1)%5][offset+1];
				/* blue */
				out_fb.video_buffer[offset+2] = rendered_frames[(view_shift+2)%5][offset+2];
			}
		}
#else
		/*calibration*/
		for (y=0; y<out_fb.height; y++) {
			u32 line_shift = (5- y%5) % 5;
			for (x=0; x<out_fb.width; x++) {
				u32 view_shift = (line_shift+bpp*x)%5;
				u32 offset = out_fb.pitch*y + x*bpp;
				out_fb.video_buffer[offset] = ((view_shift)%5 == 2) ? 0xFF : 0;
				out_fb.video_buffer[offset+1] = ((view_shift+1)%5 == 2) ? 0xFF : 0;
				out_fb.video_buffer[offset+2] = ((view_shift+2)%5 == 2) ? 0xFF : 0;
			}
		}
#endif
		write_bmp(&out_fb, "output", 0);
	}

	/*destroy everything*/
	gf_bifs_decoder_del(b2v.bifs);
	gf_sg_del(b2v.sg);
	gf_sc_set_scene(b2v.sr, NULL);
	gf_sc_del(b2v.sr);



err_exit:
	/*	if (rendered_frames) {
			for (viewpoint_index = 1; viewpoint_index <= nb_viewpoints; viewpoint_index++) {
				if (rendered_frames[viewpoint_index-1]) gf_free(rendered_frames[viewpoint_index-1]);
			}
			gf_free(rendered_frames);
		}
		if (output_merged_frame) gf_free(output_merged_frame);
	*/
	if (user.modules) gf_modules_del(user.modules);
	if (needs_raw) gf_cfg_set_key(user.config, "Video", "DriverName", old_driv);
	gf_cfg_del(user.config);
}