예제 #1
0
static char* PrintCntlInfo(
	MP4FileHandle mp4File,
	MP4TrackId trackId)
{
  const char *media_data_name = MP4GetTrackMediaDataName(mp4File, trackId);
  const char *typeName = "Unknown";

  if (media_data_name == NULL) {
    typeName = "Unknown - no media data name";
  } else if (strcasecmp(media_data_name, "href") == 0) {
    typeName = "ISMA Href";
  } else {
    typeName = media_data_name;
  }

  MP4Duration trackDuration = 
    MP4GetTrackDuration(mp4File, trackId);
 
  double msDuration = 
    UINT64_TO_DOUBLE(MP4ConvertFromTrackDuration(mp4File, trackId, 
						 trackDuration, MP4_MSECS_TIME_SCALE));
  char *sInfo = (char *)MP4Malloc(256);

  snprintf(sInfo, 256,
	   "%u\tcontrol\t%s, %.3f secs\n",
	   trackId, 
	   typeName,
	   msDuration / 1000.0);
  return sInfo;
}
예제 #2
0
/** Action for setting chapters every n second in <b>job.file</b>
 *
 *  
 *  @param job the job to process
 *  @return mp4v2::util::SUCCESS if successful, mp4v2::util::FAILURE otherwise
 */
bool
ChapterUtility::actionEvery( JobContext& job )
{
    ostringstream oss;
    oss << "Setting " << getChapterTypeName( _ChapterType ) << " chapters every "
        << _ChaptersEvery << " seconds in file " << '"' << job.file << '"' << endl;

    verbose1f( "%s", oss.str().c_str() );
    if( dryrunAbort() )
    {
        return SUCCESS;
    }

    job.fileHandle = MP4Modify( job.file.c_str() );
    if( job.fileHandle == MP4_INVALID_FILE_HANDLE )
    {
        return herrf( "unable to open for write: %s\n", job.file.c_str() );
    }

    bool isVideoTrack = false;
    MP4TrackId refTrackId = getReferencingTrack( job.fileHandle, isVideoTrack );
    if( !MP4_IS_VALID_TRACK_ID(refTrackId) )
    {
        return herrf( "unable to find a video or audio track in file %s\n", job.file.c_str() );
    }

    Timecode refTrackDuration( MP4GetTrackDuration( job.fileHandle, refTrackId ), MP4GetTrackTimeScale( job.fileHandle, refTrackId ) );
    refTrackDuration.setScale( CHAPTERTIMESCALE );

    Timecode chapterDuration( _ChaptersEvery * 1000, CHAPTERTIMESCALE );
    chapterDuration.setFormat( Timecode::DECIMAL );
    vector<MP4Chapter_t> chapters;

    do
    {
        MP4Chapter_t chap;
        chap.duration = refTrackDuration.duration > chapterDuration.duration ? chapterDuration.duration : refTrackDuration.duration;
        sprintf(chap.title, "Chapter %lu", (unsigned long)chapters.size()+1);

        chapters.push_back( chap );
        refTrackDuration -= chapterDuration;
    }
    while( refTrackDuration.duration > 0 );

    if( 0 < chapters.size() )
    {
        MP4SetChapters(job.fileHandle, &chapters[0], (uint32_t)chapters.size(), _ChapterType);
    }

    fixQtScale( job.fileHandle );
    job.optimizeApplicable = true;

    return SUCCESS;
}
예제 #3
0
    MP4Reader(const std::string &file_path)
            : time_scale(9 * MP4_MSECS_TIME_SCALE)
            , file_path(file_path)
            , handle(MP4_INVALID_FILE_HANDLE)
            , video_track_id(MP4_INVALID_TRACK_ID)
            , next_video_sample_idx(1)
            , video_sample(nullptr)
            , video_timescale(0)
            , video_sample_max_size(0)
            , video_sample_number(0)
            , video_duration(0)
            , pSeqHeaders(nullptr)
            , pSeqHeaderSize(nullptr)
            , pPictHeaders(nullptr)
            , pPictHeaderSize(nullptr)
    {
        handle = MP4Read(this->file_path.c_str());

        video_track_id = MP4FindTrackId(handle, 0, MP4_VIDEO_TRACK_TYPE);
        if (video_track_id != MP4_INVALID_TRACK_ID) {
            video_timescale = MP4GetTrackTimeScale(handle, video_track_id);
            video_sample_max_size = MP4GetTrackMaxSampleSize(handle, video_track_id) * 2;
            video_duration = MP4GetTrackDuration(handle, video_track_id);
            video_sample = new unsigned char[video_sample_max_size];
            video_sample_number = MP4GetTrackNumberOfSamples(handle, video_track_id);

            if (MP4GetTrackH264SeqPictHeaders(handle,
                                              video_track_id,
                                              &pSeqHeaders,
                                              &pSeqHeaderSize,
                                              &pPictHeaders,
                                              &pPictHeaderSize))
            {
                printf("Get SPS(%d) and PPS(%d)\n", *pSeqHeaderSize, *pPictHeaderSize);

                for(int i = 0; (pSeqHeaders[i] && pSeqHeaderSize[i]); i++) {
                    printf("SPS(%d): %02x %02x %02x %02x %02x\n", i,
                           pSeqHeaders[i][0], pSeqHeaders[i][1], pSeqHeaders[i][2],
                           pSeqHeaders[i][3], pSeqHeaders[i][4]);
                }
                for(int i = 0; (pPictHeaders[i] && pPictHeaderSize[i]); i++) {
                    printf("PPS(%d): %02x %02x %02x %02x %02x\n", i,
                           pPictHeaders[i][0], pPictHeaders[i][1], pPictHeaders[i][2],
                           pPictHeaders[i][3], pPictHeaders[i][4]);
                }
            }
        }
    }
예제 #4
0
/**************************************************************************
 * Quicktime stream base class functions
 **************************************************************************/
CMp4ByteStream::CMp4ByteStream (CMp4File *parent,
				MP4TrackId track,
				const char *type,
				bool has_video)
  : COurInByteStream(type)
{
#ifdef ISMACRYP_DEBUG
  my_enc_file = fopen("encbuffer.raw", "w");
  my_unenc_file = fopen("unencbuffer.raw", "w");
  my_unenc_file2 = fopen("unencbuffer2.raw", "w");
#endif
#ifdef OUTPUT_TO_FILE
  char buffer[80];
  strcpy(buffer, type);
  strcat(buffer, ".raw");
  m_output_file = fopen(buffer, "w");
#endif
  m_track = track;
  m_frame_on = 1;
  m_parent = parent;
  m_eof = false;
  MP4FileHandle fh = parent->get_file();
  m_frames_max = MP4GetTrackNumberOfSamples(fh, m_track);
  mp4f_message(LOG_DEBUG, "%s - %u samples", type, m_frames_max);
  m_max_frame_size = MP4GetTrackMaxSampleSize(fh, m_track) + 4; 
  m_sample_freq = MP4GetTrackTimeScale(fh, m_track);
  m_buffer = (u_int8_t *) malloc(m_max_frame_size * sizeof(u_int8_t));
  m_has_video = has_video;
  m_frame_in_buffer = 0xffffffff;
  MP4Duration trackDuration;
  trackDuration = MP4GetTrackDuration(fh, m_track);
  uint64_t max_ts;
  max_ts = MP4ConvertFromTrackDuration(fh, 
				       m_track, 
				       trackDuration,
				       MP4_MSECS_TIME_SCALE);
  m_max_time = UINT64_TO_DOUBLE(max_ts);
  m_max_time /= 1000.0;
  mp4f_message(LOG_DEBUG, 
	       "MP4 %s max time is "U64" %g", type, max_ts, m_max_time);
  read_frame(1, NULL);
}
예제 #5
0
/** Action for importing chapters into the <b>job.file</b>
 *
 *  
 *  @param job the job to process
 *  @return mp4v2::util::SUCCESS if successful, mp4v2::util::FAILURE otherwise
 */
bool
ChapterUtility::actionImport( JobContext& job )
{
    vector<MP4Chapter_t> chapters;
    Timecode::Format format;

    // create the chapter file name
    string inName = job.file;
    if( _ChapterFile.empty() )
    {
        FileSystem::pathnameStripExtension( inName );
        inName.append( ".chapters.txt" );
    }
    else
    {
        inName = _ChapterFile;
    }

    if( parseChapterFile( inName, chapters, format ) )
    {
        return FAILURE;
    }

    ostringstream oss;
    oss << "Importing " << chapters.size() << " " << getChapterTypeName( _ChapterType );
    oss << " chapters from file " << inName << " into file " << '"' << job.file << '"' << endl;

    verbose1f( "%s", oss.str().c_str() );
    if( dryrunAbort() )
    {
        return SUCCESS;
    }

    if( 0 == chapters.size() )
    {
        return herrf( "No chapters found in file %s\n", inName.c_str() );
    }

    job.fileHandle = MP4Modify( job.file.c_str() );
    if( job.fileHandle == MP4_INVALID_FILE_HANDLE )
    {
        return herrf( "unable to open for write: %s\n", job.file.c_str() );
    }

    bool isVideoTrack = false;
    MP4TrackId refTrackId = getReferencingTrack( job.fileHandle, isVideoTrack );
    if( !MP4_IS_VALID_TRACK_ID(refTrackId) )
    {
        return herrf( "unable to find a video or audio track in file %s\n", job.file.c_str() );
    }
    if( Timecode::FRAME == format && !isVideoTrack )
    {
        // we need a video track for this
        return herrf( "unable to find a video track in file %s but chapter file contains frame timestamps\n", job.file.c_str() );
    }

    // get duration and recalculate scale
    Timecode refTrackDuration( MP4GetTrackDuration( job.fileHandle, refTrackId ),
                               MP4GetTrackTimeScale( job.fileHandle, refTrackId ) );
    refTrackDuration.setScale( CHAPTERTIMESCALE );

    // check for chapters starting after duration of reftrack
    for( vector<MP4Chapter_t>::iterator it = chapters.begin(); it != chapters.end(); )
    {
        Timecode curr( (*it).duration, CHAPTERTIMESCALE );
        if( refTrackDuration <= curr )
        {
            hwarnf( "Chapter '%s' start: %s, playlength of file: %s, chapter cannot be set\n",
                    (*it).title, curr.svalue.c_str(), refTrackDuration.svalue.c_str() );
            it = chapters.erase( it );
        }
        else
        {
            ++it;
        }
    }
    if( 0 == chapters.size() )
    {
        return SUCCESS;
    }

    // convert start time into duration
	uint32_t framerate = static_cast<uint32_t>( CHAPTERTIMESCALE );
    if( Timecode::FRAME == format )
    {
        // get the framerate
        MP4SampleId sampleCount = MP4GetTrackNumberOfSamples( job.fileHandle, refTrackId );
        Timecode tmpcd( refTrackDuration.svalue, CHAPTERTIMESCALE );
		framerate = static_cast<uint32_t>( std::ceil( ((double)sampleCount / (double)tmpcd.duration) * CHAPTERTIMESCALE ) );
    }

    for( vector<MP4Chapter_t>::iterator it = chapters.begin(); it != chapters.end(); ++it )
    {
        MP4Duration currDur = (*it).duration;
        MP4Duration nextDur =  chapters.end() == it+1 ? refTrackDuration.duration : (*(it+1)).duration;

        if( Timecode::FRAME == format )
        {
            // convert from frame nr to milliseconds
            currDur = convertFrameToMillis( (*it).duration, framerate );

            if( chapters.end() != it+1 )
            {
                nextDur = convertFrameToMillis( (*(it+1)).duration, framerate );
            }
        }

        (*it).duration = nextDur - currDur;
    }

    // now set the chapters
    MP4SetChapters( job.fileHandle, &chapters[0], (uint32_t)chapters.size(), _ChapterType );

    fixQtScale( job.fileHandle );
    job.optimizeApplicable = true;

    return SUCCESS;
}
예제 #6
0
int AacPcm::getInfos(MediaInfo *infos)
{
	if(!infos)
		return 1;

	if(hDecoder)
	{
		SHOW_INFO()
	    return 0;
	}

	IsAAC=strcmpi(infos->getFilename()+lstrlen(infos->getFilename())-4,".aac")==0;

	if(!IsAAC) // MP4 file ---------------------------------------------------------------------
	{
	MP4Duration			length;
	unsigned __int32	buffer_size;
    mp4AudioSpecificConfig mp4ASC;

		if(!(mp4File=MP4Read(infos->getFilename(), 0)))
			ERROR_getInfos("Error opening file");

		if((track=GetAACTrack(mp4File))<0)
			ERROR_getInfos(0); //"Unable to find correct AAC sound track");

		if(!(hDecoder=faacDecOpen()))
			ERROR_getInfos("Error initializing decoder library");

		MP4GetTrackESConfiguration(mp4File, track, (unsigned __int8 **)&buffer, &buffer_size);
		if(!buffer)
			ERROR_getInfos("MP4GetTrackESConfiguration");
		AudioSpecificConfig(buffer, buffer_size, &mp4ASC);
        Channels=mp4ASC.channelsConfiguration;

		if(faacDecInit2(hDecoder, buffer, buffer_size, &Samplerate, &Channels) < 0)
			ERROR_getInfos("Error initializing decoder library");
		FREE_ARRAY(buffer);

		length=MP4GetTrackDuration(mp4File, track);
		len_ms=(DWORD)MP4ConvertFromTrackDuration(mp4File, track, length, MP4_MSECS_TIME_SCALE);
		file_info.bitrate=MP4GetTrackBitRate(mp4File, track);
		file_info.version=MP4GetTrackAudioType(mp4File, track)==MP4_MPEG4_AUDIO_TYPE ? 4 : 2;
		numSamples=MP4GetTrackNumberOfSamples(mp4File, track);
		sampleId=1;
	}
	else // AAC file ------------------------------------------------------------------------------
	{   
	DWORD			read,
					tmp;
	BYTE			Channels4Raw=0;

		if(!(aacFile=fopen(infos->getFilename(),"rb")))
			ERROR_getInfos("Error opening file"); 

		// use bufferized stream
		setvbuf(aacFile,NULL,_IOFBF,32767);

		// get size of file
		fseek(aacFile, 0, SEEK_END);
		src_size=ftell(aacFile);
		fseek(aacFile, 0, SEEK_SET);

		if(!(buffer=(BYTE *)malloc(FAAD_STREAMSIZE)))
			ERROR_getInfos("Memory allocation error: buffer")

		tmp=src_size<FAAD_STREAMSIZE ? src_size : FAAD_STREAMSIZE;
		read=fread(buffer, 1, tmp, aacFile);
		if(read==tmp)
		{
			bytes_read=read;
			bytes_into_buffer=read;
		}
		else
			ERROR_getInfos("Read failed!")

		if(tagsize=id3v2_tag(buffer))
		{
			if(tagsize>(long)src_size)
				ERROR_getInfos("Corrupt stream!");
			if(tagsize<bytes_into_buffer)
			{
				bytes_into_buffer-=tagsize;
				memcpy(buffer,buffer+tagsize,bytes_into_buffer);
			}
			else
			{
				bytes_read=tagsize;
				bytes_into_buffer=0;
				if(tagsize>bytes_into_buffer)
					fseek(aacFile, tagsize, SEEK_SET);
			}
			if(src_size<bytes_read+FAAD_STREAMSIZE-bytes_into_buffer)
				tmp=src_size-bytes_read;
			else
				tmp=FAAD_STREAMSIZE-bytes_into_buffer;
			read=fread(buffer+bytes_into_buffer, 1, tmp, aacFile);
			if(read==tmp)
			{
				bytes_read+=read;
				bytes_into_buffer+=read;
			}
			else
				ERROR_getInfos("Read failed!");
		}

		if(get_AAC_format((char *)infos->getFilename(), &file_info, &seek_table, &seek_table_length, 0))
			ERROR_getInfos("get_AAC_format");
		IsSeekable=file_info.headertype==ADTS && seek_table && seek_table_length>0;
		BlockSeeking=!IsSeekable;

		if(!(hDecoder=faacDecOpen()))
			ERROR_getInfos("Can't open library");

		if(file_info.headertype==RAW)
		{
		faacDecConfiguration	config;

			config.defSampleRate=atoi(cfg_samplerate);
			switch(cfg_profile[1])
			{
			case 'a':
				config.defObjectType=MAIN;
				break;
			case 'o':
				config.defObjectType=LOW;
				break;
			case 'S':
				config.defObjectType=SSR;
				break;
			case 'T':
				config.defObjectType=LTP;
				break;
			}
			switch(cfg_bps[0])
			{
			case '1':
				config.outputFormat=FAAD_FMT_16BIT;
				break;
			case '2':
				config.outputFormat=FAAD_FMT_24BIT;
				break;
			case '3':
				config.outputFormat=FAAD_FMT_32BIT;
				break;
			case 'F':
				config.outputFormat=FAAD_FMT_24BIT;
				break;
			}
			faacDecSetConfiguration(hDecoder, &config);

			if(!FindBitrate)
			{
			AacPcm *NewInst;
				if(!(NewInst=new AacPcm()))
					ERROR_getInfos("Memory allocation error: NewInst");

				NewInst->FindBitrate=TRUE;
				if(NewInst->getInfos(infos))
					ERROR_getInfos(0);
				Channels4Raw=NewInst->frameInfo.channels;
				file_info.bitrate=NewInst->file_info.bitrate*Channels4Raw;
				delete NewInst;
			}
			else
			{
			DWORD	Samples,
					BytesConsumed;

				if((bytes_consumed=faacDecInit(hDecoder,buffer,bytes_into_buffer,&Samplerate,&Channels))<0)
					ERROR_getInfos("Can't init library");
				bytes_into_buffer-=bytes_consumed;
				if(!processData(infos,0,0))
					ERROR_getInfos(0);
				Samples=frameInfo.samples/sizeof(short);
				BytesConsumed=frameInfo.bytesconsumed;
				processData(infos,0,0);
				if(BytesConsumed<frameInfo.bytesconsumed)
					BytesConsumed=frameInfo.bytesconsumed;
				file_info.bitrate=(BytesConsumed*8*Samplerate)/Samples;
				if(!file_info.bitrate)
					file_info.bitrate=1000; // try to continue decoding
				return 0;
			}
		}

		if((bytes_consumed=faacDecInit(hDecoder, buffer, bytes_into_buffer, &Samplerate, &Channels))<0)
			ERROR_getInfos("faacDecInit failed!")
		bytes_into_buffer-=bytes_consumed;

		if(Channels4Raw)
			Channels=Channels4Raw;

		len_ms=(DWORD)((1000*((float)src_size*8))/file_info.bitrate);
	}

	SHOW_INFO();
    return 0;
}
예제 #7
0
파일: aac.c 프로젝트: vadmium/deadbeef
// returns -1 for error, 0 for mp4, 1 for aac
int
aac_probe (DB_FILE *fp, const char *fname, MP4FILE_CB *cb, float *duration, int *samplerate, int *channels, int *totalsamples, int *mp4track, MP4FILE *pmp4) {
    // try mp4
    trace ("aac_probe: pos=%lld, junk=%d\n", deadbeef->ftell (fp), ((aac_info_t*)cb->user_data)->junk);

    if (mp4track) {
        *mp4track = -1;
    }
    if (*pmp4) {
        *pmp4 = NULL;
    }
    *duration = -1;
#ifdef USE_MP4FF
    trace ("mp4ff_open_read\n");
    mp4ff_t *mp4 = mp4ff_open_read (cb);
#else
    MP4FileHandle mp4 = MP4ReadProvider (fname, 0, cb);
#endif
    if (!mp4) {
        trace ("not an mp4 file\n");
        return -1;
    }
    if (pmp4) {
        *pmp4 = mp4;
    }
#ifdef USE_MP4FF
    int ntracks = mp4ff_total_tracks (mp4);
    if (ntracks > 0) {
        trace ("m4a container detected, ntracks=%d\n", ntracks);
        int i = -1;
        trace ("looking for mp4 data...\n");
        int sr = -1;
        unsigned char*  buff = 0;
        unsigned int    buff_size = 0;
        for (i = 0; i < ntracks; i++) {
            mp4AudioSpecificConfig mp4ASC;
            mp4ff_get_decoder_config(mp4, i, &buff, &buff_size);
            if (buff) {
                int rc = AudioSpecificConfig(buff, buff_size, &mp4ASC);
                sr = mp4ASC.samplingFrequency;
                if(rc < 0) {
                    free (buff);
                    buff = 0;
                    continue;
                }
                break;
            }
        }
        if (i != ntracks && buff) 
        {
            trace ("found audio track (%d)\n", i);
            // init mp4 decoding
            NeAACDecHandle dec = NeAACDecOpen ();
            unsigned long srate;
            unsigned char ch;
            if (NeAACDecInit2(dec, buff, buff_size, &srate, &ch) < 0) {
                trace ("NeAACDecInit2 returned error\n");
                goto error;
            }
            *samplerate = srate;
            *channels = ch;
            int samples = mp4ff_num_samples(mp4, i);
            samples = (int64_t)samples * srate / mp4ff_time_scale (mp4, i);
            int tsamples = samples;
            NeAACDecConfigurationPtr conf = NeAACDecGetCurrentConfiguration (dec);
            conf->dontUpSampleImplicitSBR = 1;
            NeAACDecSetConfiguration (dec, conf);
            mp4AudioSpecificConfig mp4ASC;
            int mp4framesize;
            if (NeAACDecAudioSpecificConfig(buff, buff_size, &mp4ASC) >= 0)
            {
                mp4framesize = 1024;
                if (mp4ASC.frameLengthFlag == 1) {
                    mp4framesize = 960;
                }
                // commented this out, since it fixes double-duration bug on
                // some mp4 files
                //if (mp4ASC.sbr_present_flag == 1) {
                //    mp4framesize *= 2;
                //}
            }
            else {
                trace ("NeAACDecAudioSpecificConfig failed, can't get mp4framesize\n");
                goto error;
            }
            tsamples *= mp4framesize;

            trace ("mp4 nsamples=%d, samplerate=%d, timescale=%d, duration=%lld\n", samples, *samplerate, mp4ff_time_scale(mp4, i), mp4ff_get_track_duration(mp4, i));
            *duration = (float)tsamples / (*samplerate);
            trace ("mp4 duration: %f (tsamples %d/samplerate %d)\n", *duration, tsamples, *samplerate);
            
            NeAACDecClose (dec);

            if (totalsamples) {
                *totalsamples = tsamples;
            }
            if (mp4track) {
                *mp4track = i;
            }
            if (!*pmp4) {
                mp4ff_close (mp4);
            }
            return 0;
error:
            NeAACDecClose (dec);
            free (buff);
            if (!*pmp4) {
                mp4ff_close (mp4);
            }
            return -1;
        }
        else {
            trace ("audio track not found\n");
            mp4ff_close (mp4);
            mp4 = NULL;
        }
        if (buff) {
            free (buff);
            buff = NULL;
        }

    }
#else
    MP4FileHandle mp4File = mp4;
    MP4TrackId trackId = MP4FindTrackId(mp4File, 0, "audio", 0);
    trace ("trackid: %d\n", trackId);
    uint32_t timeScale = MP4GetTrackTimeScale(mp4File, trackId);
    MP4Duration trackDuration = MP4GetTrackDuration(mp4File, trackId);
    MP4SampleId numSamples = MP4GetTrackNumberOfSamples(mp4File, trackId);
    u_int8_t* pConfig;
    uint32_t configSize = 0;
    bool res = MP4GetTrackESConfiguration(mp4File, trackId, &pConfig, &configSize);
    if (res && pConfig) {
        mp4AudioSpecificConfig mp4ASC;
        int rc = AudioSpecificConfig(pConfig, configSize, &mp4ASC);
        free (pConfig);
        if (rc >= 0) {
            *samplerate = mp4ASC.samplingFrequency;
            *channels = MP4GetTrackAudioChannels (mp4File, trackId);
//            int64_t duration = MP4ConvertFromTrackDuration (mp4File, trackId, trackDuration, timeScale);
            int samples = MP4GetTrackNumberOfSamples (mp4File, trackId) * 1024 * (*channels);
            trace ("mp4 nsamples=%d, timescale=%d, samplerate=%d\n", samples, timeScale, *samplerate);
            *duration = (float)samples / (*samplerate);

            if (totalsamples) {
                *totalsamples = samples;
            }
            if (mp4track) {
                *mp4track = trackId;
            }
            if (!*pmp4) {
                MP4Close (mp4);
            }
            return 0;
        }
    }
#endif
    if (*pmp4) {
        *pmp4 = NULL;
    }

    if (mp4) {
#if USE_MP4FF
        mp4ff_close (mp4);
#else
        MP4Close (mp4);
#endif
        mp4 = NULL;
    }
    trace ("mp4 track not found, looking for aac stream...\n");

    // not an mp4, try raw aac
#if USE_MP4FF
    deadbeef->rewind (fp);
#endif
    if (parse_aac_stream (fp, samplerate, channels, duration, totalsamples) == -1) {
        trace ("aac stream not found\n");
        return -1;
    }
    trace ("found aac stream\n");
    return 1;
}
예제 #8
0
static char* PrintAudioInfo(
	MP4FileHandle mp4File,
	MP4TrackId trackId)
{
	static const char* mpeg4AudioNames[] = {
		"MPEG-4 AAC main",
		"MPEG-4 AAC LC",
		"MPEG-4 AAC SSR",
		"MPEG-4 AAC LTP",
		"MPEG-4 AAC HE",
		"MPEG-4 AAC Scalable",
		"MPEG-4 TwinVQ",
		"MPEG-4 CELP",
		"MPEG-4 HVXC",
		NULL, NULL,
		"MPEG-4 TTSI",
		"MPEG-4 Main Synthetic",
		"MPEG-4 Wavetable Syn",
		"MPEG-4 General MIDI",
		"MPEG-4 Algo Syn and Audio FX",
		"MPEG-4 ER AAC LC",
		NULL,
		"MPEG-4 ER AAC LTP",
		"MPEG-4 ER AAC Scalable",
		"MPEG-4 ER TwinVQ",
		"MPEG-4 ER BSAC",
		"MPEG-4 ER ACC LD",
		"MPEG-4 ER CELP",
		"MPEG-4 ER HVXC",
		"MPEG-4 ER HILN",
		"MPEG-4 ER Parametric",
		"MPEG-4 SSC",
		"MPEG-4 PS",
		"MPEG-4 MPEG Surround",
		NULL,
		"MPEG-4 Layer-1",
		"MPEG-4 Layer-2",
		"MPEG-4 Layer-3",
		"MPEG-4 DST",
		"MPEG-4 Audio Lossless",
		"MPEG-4 SLS",
		"MPEG-4 SLS non-core", 
	};

	static const u_int8_t mpegAudioTypes[] = {
		MP4_MPEG2_AAC_MAIN_AUDIO_TYPE,	// 0x66
		MP4_MPEG2_AAC_LC_AUDIO_TYPE,	// 0x67
		MP4_MPEG2_AAC_SSR_AUDIO_TYPE,	// 0x68
		MP4_MPEG2_AUDIO_TYPE,			// 0x69
		MP4_MPEG1_AUDIO_TYPE,			// 0x6B
		// private types
		MP4_PCM16_LITTLE_ENDIAN_AUDIO_TYPE,
		MP4_VORBIS_AUDIO_TYPE,
		MP4_ALAW_AUDIO_TYPE,
		MP4_ULAW_AUDIO_TYPE,
		MP4_G723_AUDIO_TYPE,
		MP4_PCM16_BIG_ENDIAN_AUDIO_TYPE,
	};
	static const char* mpegAudioNames[] = {
		"MPEG-2 AAC Main",
		"MPEG-2 AAC LC",
		"MPEG-2 AAC SSR",
		"MPEG-2 Audio (13818-3)",
		"MPEG-1 Audio (11172-3)",
		// private types
		"PCM16 (little endian)",
		"Vorbis",
		"G.711 aLaw",
		"G.711 uLaw",
		"G.723.1",
		"PCM16 (big endian)",
	};
	u_int8_t numMpegAudioTypes =
		sizeof(mpegAudioTypes) / sizeof(u_int8_t);

	const char* typeName = "Unknown";
	bool foundType = false;
	u_int8_t type = 0;
	const char *media_data_name;

	media_data_name = MP4GetTrackMediaDataName(mp4File, trackId);

	if (media_data_name == NULL) {
	  typeName = "Unknown - no media data name";
	} else if (strcasecmp(media_data_name, "samr") == 0) {
	    typeName = "AMR";
	    foundType = true;
	} else if (strcasecmp(media_data_name, "sawb") == 0) {
	    typeName = "AMR-WB";
	    foundType = true;
	} else if (strcasecmp(media_data_name, "mp4a") == 0) {
	    
	  type = MP4GetTrackEsdsObjectTypeId(mp4File, trackId);
	  switch (type) {
	  case MP4_INVALID_AUDIO_TYPE:
	    typeName = "AAC from .mov";
	    foundType = true;
	    break;
	  case MP4_MPEG4_AUDIO_TYPE:  {
	
	    type = MP4GetTrackAudioMpeg4Type(mp4File, trackId);
	    if (type == MP4_MPEG4_INVALID_AUDIO_TYPE ||
		type > NUM_ELEMENTS_IN_ARRAY(mpeg4AudioNames) || 
		mpeg4AudioNames[type - 1] == NULL) {
	      typeName = "MPEG-4 Unknown Profile";
	    } else {
	      typeName = mpeg4AudioNames[type - 1];
	      foundType = true;
	    }
	    break;
	  }
	    // fall through
	  default:
	    for (u_int8_t i = 0; i < numMpegAudioTypes; i++) {
	      if (type == mpegAudioTypes[i]) {
		typeName = mpegAudioNames[i];
		foundType = true;
		break;
	      }
	    }
	  }
	} else {
	  typeName = media_data_name;
	  foundType = true;
	}

	u_int32_t timeScale =
		MP4GetTrackTimeScale(mp4File, trackId);

	MP4Duration trackDuration =
		MP4GetTrackDuration(mp4File, trackId);

	double msDuration =
		UINT64_TO_DOUBLE(MP4ConvertFromTrackDuration(mp4File, trackId,
			trackDuration, MP4_MSECS_TIME_SCALE));

	u_int32_t avgBitRate =
		MP4GetTrackBitRate(mp4File, trackId);

	char *sInfo = (char*)MP4Malloc(256);

	// type duration avgBitrate samplingFrequency
	if (foundType)
	  snprintf(sInfo, 256, 
		  "%u\taudio\t%s%s, %.3f secs, %u kbps, %u Hz\n",
		  trackId,
		  MP4IsIsmaCrypMediaTrack(mp4File, trackId) ? "enca - " : "",
		  typeName,
		  msDuration / 1000.0,
		  (avgBitRate + 500) / 1000,
		  timeScale);
	else
	  snprintf(sInfo, 256,
		  "%u\taudio\t%s%s(%u), %.3f secs, %u kbps, %u Hz\n",
		  trackId,
		  MP4IsIsmaCrypMediaTrack(mp4File, trackId) ? "enca - " : "",
		  typeName,
		  type,
		  msDuration / 1000.0,
		  (avgBitRate + 500) / 1000,
		  timeScale);

	return sInfo;
}
예제 #9
0
static char* PrintVideoInfo(
	MP4FileHandle mp4File,
	MP4TrackId trackId)
{

	static const u_int8_t mpegVideoTypes[] = {
		MP4_MPEG2_SIMPLE_VIDEO_TYPE,	// 0x60
		MP4_MPEG2_MAIN_VIDEO_TYPE,		// 0x61
		MP4_MPEG2_SNR_VIDEO_TYPE,		// 0x62
		MP4_MPEG2_SPATIAL_VIDEO_TYPE,	// 0x63
		MP4_MPEG2_HIGH_VIDEO_TYPE,		// 0x64
		MP4_MPEG2_442_VIDEO_TYPE,		// 0x65
		MP4_MPEG1_VIDEO_TYPE,			// 0x6A
		MP4_JPEG_VIDEO_TYPE,			// 0x6C
		MP4_YUV12_VIDEO_TYPE,
		MP4_H263_VIDEO_TYPE,
		MP4_H261_VIDEO_TYPE,
	};
	static const char* mpegVideoNames[] = {
		"MPEG-2 Simple",
		"MPEG-2 Main",
		"MPEG-2 SNR",
		"MPEG-2 Spatial",
		"MPEG-2 High",
		"MPEG-2 4:2:2",
		"MPEG-1",
		"JPEG",
		"YUV12",
		"H.263",
		"H.261",
	};
	u_int8_t numMpegVideoTypes =
		sizeof(mpegVideoTypes) / sizeof(u_int8_t);
	bool foundTypeName = false;
	const char* typeName = "Unknown";

	const char *media_data_name;
	char originalFormat[8];
	char  oformatbuffer[32];
	originalFormat[0] = 0;
	*oformatbuffer = 0;
	uint8_t type = 0;
	
	media_data_name = MP4GetTrackMediaDataName(mp4File, trackId);
	// encv 264b
	if (strcasecmp(media_data_name, "encv") == 0) {
	  if (MP4GetTrackMediaDataOriginalFormat(mp4File, 
						 trackId, 
						 originalFormat, 
						 sizeof(originalFormat)) == false)
	      media_data_name = NULL;
	      
	}
  
	char  typebuffer[80];
	if (media_data_name == NULL) {
	  typeName = "Unknown - no media data name";
	  foundTypeName = true;
	} else if ((strcasecmp(media_data_name, "avc1") == 0) ||
	  	(strcasecmp(originalFormat, "264b") == 0)) {
	  // avc
	  uint8_t profile, level;
	  char profileb[20], levelb[20];
	  if (MP4GetTrackH264ProfileLevel(mp4File, trackId, 
					  &profile, &level)) {
	    if (profile == 66) {
	      strcpy(profileb, "Baseline");
	    } else if (profile == 77) {
	      strcpy(profileb, "Main");
	    } else if (profile == 88) {
	      strcpy(profileb, "Extended");
	    } else if (profile == 100) {
	      strcpy(profileb, "High");
	    } else if (profile == 110) {
	      strcpy(profileb, "High 10");
	    } else if (profile == 122) {
	      strcpy(profileb, "High 4:2:2");
	    } else if (profile == 144) {
	      strcpy(profileb, "High 4:4:4");
	    } else {
	      snprintf(profileb, 20, "Unknown Profile %x", profile);
	    } 
	    switch (level) {
	    case 10: case 20: case 30: case 40: case 50:
	      snprintf(levelb, 20, "%u", level / 10);
	      break;
	    case 11: case 12: case 13:
	    case 21: case 22:
	    case 31: case 32:
	    case 41: case 42:
	    case 51:
	      snprintf(levelb, 20, "%u.%u", level / 10, level % 10);
	      break;
	    default:
	      snprintf(levelb, 20, "unknown level %x", level);
	      break;
	    }
	    if (originalFormat != NULL && originalFormat[0] != '\0') 
	      snprintf(oformatbuffer, 32, "(%s) ", originalFormat);
	    snprintf(typebuffer, sizeof(typebuffer), "H264 %s%s@%s", 
		    oformatbuffer, profileb, levelb);
	    typeName = typebuffer;
	  } else {
	    typeName = "H.264 - profile/level error";
	  }
	  foundTypeName = true;
	} else if (strcasecmp(media_data_name, "s263") == 0) {
	  // 3gp h.263
	  typeName = "H.263";
	  foundTypeName = true;
	} else if ((strcasecmp(media_data_name, "mp4v") == 0) ||
		   (strcasecmp(media_data_name, "encv") == 0)) {
	  // note encv might needs it's own field eventually.
	  type = MP4GetTrackEsdsObjectTypeId(mp4File, trackId);
	  if (type == MP4_MPEG4_VIDEO_TYPE) {
	    type = MP4GetVideoProfileLevel(mp4File, trackId);
	    typeName = Mpeg4VisualProfileName(type);
	    if (typeName == NULL) {
	      typeName = "MPEG-4 Unknown Profile";
	    } else {
	      foundTypeName = true;
	    }
	  } else {
	    for (u_int8_t i = 0; i < numMpegVideoTypes; i++) {
	      if (type == mpegVideoTypes[i]) {
		typeName = mpegVideoNames[i];
		foundTypeName = true;
		break;
	      }
	    }
	  }
	} else {
	  typeName = media_data_name;
	  foundTypeName = true; // we don't have a type value to display
	}

	MP4Duration trackDuration =
		MP4GetTrackDuration(mp4File, trackId);

	double msDuration =
		UINT64_TO_DOUBLE(MP4ConvertFromTrackDuration(mp4File, trackId,
			trackDuration, MP4_MSECS_TIME_SCALE));

	u_int32_t avgBitRate =
		MP4GetTrackBitRate(mp4File, trackId);

	// Note not all mp4 implementations set width and height correctly
	// The real answer can be buried inside the ES configuration info
	u_int16_t width = MP4GetTrackVideoWidth(mp4File, trackId);

	u_int16_t height = MP4GetTrackVideoHeight(mp4File, trackId);

	double fps = MP4GetTrackVideoFrameRate(mp4File, trackId);

	char *sInfo = (char*)MP4Malloc(256);

	// type duration avgBitrate frameSize frameRate
	if (foundTypeName) {
	  sprintf(sInfo,
		  "%u\tvideo\t%s%s, %.3f secs, %u kbps, %ux%u @ %f fps\n",
		  trackId,
		  MP4IsIsmaCrypMediaTrack(mp4File, trackId) ? "encv - " : "",
		  typeName,
		  msDuration / 1000.0,
		  (avgBitRate + 500) / 1000,
		  width,
		  height,
		  fps
		  );
	} else {
	  sprintf(sInfo,
		  "%u\tvideo\t%s(%u), %.3f secs, %u kbps, %ux%u @ %f fps\n",
		  trackId,
		  typeName,
		  type, 
		  msDuration / 1000.0,
		  (avgBitRate + 500) / 1000,
		  width,
		  height,
		  fps
		  );
	}

	return sInfo;
}
예제 #10
0
static void *mp4Decode(void *args)
{
  MP4FileHandle mp4file;

  pthread_mutex_lock(&mutex);
  seekPosition = -1;
  bPlaying = TRUE;
  if(!(mp4file = MP4Read(args, 0))){
    mp4cfg.file_type = FILE_AAC;
    MP4Close(mp4file);
  }else{
    mp4cfg.file_type = FILE_MP4;
  }

  if(mp4cfg.file_type == FILE_MP4){
    // We are reading a MP4 file
    gint		mp4track;

    if((mp4track = getAACTrack(mp4file)) < 0){
      //TODO: check here for others Audio format.....
      g_print("Unsupported Audio track type\n");
      g_free(args);
      MP4Close(mp4file);
      bPlaying = FALSE;
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }else{
      faacDecHandle	decoder;
      unsigned char	*buffer	= NULL;
      guint		bufferSize = 0;
      gulong		samplerate;
      guchar		channels;
      guint		avgBitrate;
      MP4Duration	duration;
      gulong		msDuration;
      MP4SampleId	numSamples;
      MP4SampleId	sampleID = 1;

      decoder = faacDecOpen();
      MP4GetTrackESConfiguration(mp4file, mp4track, &buffer, &bufferSize);
      if(!buffer){
	g_free(args);
	faacDecClose(decoder);
	MP4Close(mp4file);
	bPlaying = FALSE;
	pthread_mutex_unlock(&mutex);
	pthread_exit(NULL);
      }
      if(faacDecInit2(decoder, buffer, bufferSize, &samplerate, &channels)<0){
	g_free(args);
	faacDecClose(decoder);
	MP4Close(mp4file);
	bPlaying = FALSE;
	pthread_mutex_unlock(&mutex);
	pthread_exit(NULL);
      }
      g_free(buffer);
      if(channels == 0){
	g_print("Number of Channels not supported\n");
	g_free(args);
	faacDecClose(decoder);
	MP4Close(mp4file);
	bPlaying = FALSE;
	pthread_mutex_unlock(&mutex);
	pthread_exit(NULL);
      }
      duration = MP4GetTrackDuration(mp4file, mp4track);
      msDuration = MP4ConvertFromTrackDuration(mp4file, mp4track, duration,
					       MP4_MSECS_TIME_SCALE);
      numSamples = MP4GetTrackNumberOfSamples(mp4file, mp4track);
      mp4_ip.output->open_audio(FMT_S16_NE, samplerate, channels);
      mp4_ip.output->flush(0);
      mp4_ip.set_info(args, msDuration, -1, samplerate/1000, channels);
      g_print("MP4 - %d channels @ %d Hz\n", channels, samplerate);

      while(bPlaying){
	void*			sampleBuffer;
	faacDecFrameInfo	frameInfo;    
	gint			rc;

	if(seekPosition!=-1){
	  duration = MP4ConvertToTrackDuration(mp4file,
					       mp4track,
					       seekPosition*1000,
					       MP4_MSECS_TIME_SCALE);
	  sampleID = MP4GetSampleIdFromTime(mp4file, mp4track, duration, 0);
	  mp4_ip.output->flush(seekPosition*1000);
	  seekPosition = -1;
	}
	buffer=NULL;
	bufferSize=0;
	if(sampleID > numSamples){
	  mp4_ip.output->close_audio();
	  g_free(args);
	  faacDecClose(decoder);
	  MP4Close(mp4file);
	  bPlaying = FALSE;
	  pthread_mutex_unlock(&mutex);
	  pthread_exit(NULL);
	}
	rc = MP4ReadSample(mp4file, mp4track, sampleID++, &buffer, &bufferSize,
			   NULL, NULL, NULL, NULL);
	//g_print("%d/%d\n", sampleID-1, numSamples);
	if((rc==0) || (buffer== NULL)){
	  g_print("MP4: read error\n");
	  sampleBuffer = NULL;
	  sampleID=0;
	  mp4_ip.output->buffer_free();
	  mp4_ip.output->close_audio();
	  g_free(args);
	  faacDecClose(decoder);
	  MP4Close(mp4file);
	  bPlaying = FALSE;
	  pthread_mutex_unlock(&mutex);
	  pthread_exit(NULL);
	}else{
	  sampleBuffer = faacDecDecode(decoder, &frameInfo, buffer, bufferSize);
	  if(frameInfo.error > 0){
	    g_print("MP4: %s\n",
		    faacDecGetErrorMessage(frameInfo.error));
	    mp4_ip.output->close_audio();
	    g_free(args);
	    faacDecClose(decoder);
	    MP4Close(mp4file);
	    bPlaying = FALSE;
	    pthread_mutex_unlock(&mutex);
	    pthread_exit(NULL);
	  }
	  if(buffer){
	    g_free(buffer); buffer=NULL; bufferSize=0;
	  }
	  while(bPlaying && mp4_ip.output->buffer_free()<frameInfo.samples<<1)
	    xmms_usleep(30000);
	}
	mp4_ip.add_vis_pcm(mp4_ip.output->written_time(),
			   FMT_S16_NE,
			   channels,
			   frameInfo.samples<<1,
			   sampleBuffer);
	mp4_ip.output->write_audio(sampleBuffer, frameInfo.samples<<1);
      }
      while(bPlaying && mp4_ip.output->buffer_free()){
	xmms_usleep(10000);
      }
      mp4_ip.output->close_audio();
      g_free(args);
      faacDecClose(decoder);
      MP4Close(mp4file);
      bPlaying = FALSE;
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }
  } else{
    // WE ARE READING AN AAC FILE
    FILE		*file = NULL;
    faacDecHandle	decoder = 0;
    guchar		*buffer = 0;
    gulong		bufferconsumed = 0;
    gulong		samplerate = 0;
    guchar		channels;
    gulong		buffervalid = 0;
    TitleInput*		input;
    gchar		*temp = g_strdup(args);
    gchar		*ext  = strrchr(temp, '.');
    gchar		*xmmstitle = NULL;
    faacDecConfigurationPtr config;

    if((file = fopen(args, "rb")) == 0){
      g_print("AAC: can't find file %s\n", args);
      bPlaying = FALSE;
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }
    if((decoder = faacDecOpen()) == NULL){
      g_print("AAC: Open Decoder Error\n");
      fclose(file);
      bPlaying = FALSE;
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }
    config = faacDecGetCurrentConfiguration(decoder);
    config->useOldADTSFormat = 0;
    faacDecSetConfiguration(decoder, config);
    if((buffer = g_malloc(BUFFER_SIZE)) == NULL){
      g_print("AAC: error g_malloc\n");
      fclose(file);
      bPlaying = FALSE;
      faacDecClose(decoder);
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }
    if((buffervalid = fread(buffer, 1, BUFFER_SIZE, file))==0){
      g_print("AAC: Error reading file\n");
      g_free(buffer);
      fclose(file);
      bPlaying = FALSE;
      faacDecClose(decoder);
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }
    XMMS_NEW_TITLEINPUT(input);
    input->file_name = g_basename(temp);
    input->file_ext = ext ? ext+1 : NULL;
    input->file_path = temp;
    if(!strncmp(buffer, "ID3", 3)){
      gint size = 0;

      fseek(file, 0, SEEK_SET);
      size = (buffer[6]<<21) | (buffer[7]<<14) | (buffer[8]<<7) | buffer[9];
      size+=10;
      fread(buffer, 1, size, file);
      buffervalid = fread(buffer, 1, BUFFER_SIZE, file);
    }
    xmmstitle = xmms_get_titlestring(xmms_get_gentitle_format(), input);
    if(xmmstitle == NULL)
      xmmstitle = g_strdup(input->file_name);
    if(temp) g_free(temp);
    if(input->performer) g_free(input->performer);
    if(input->album_name) g_free(input->album_name);
    if(input->track_name) g_free(input->track_name);
    if(input->genre) g_free(input->genre);
    g_free(input);
    bufferconsumed = faacDecInit(decoder,
				 buffer,
				 buffervalid,
				 &samplerate,
				 &channels);
    if(mp4_ip.output->open_audio(FMT_S16_NE,samplerate,channels) == FALSE){
      g_print("AAC: Output Error\n");
      g_free(buffer); buffer=0;
      faacDecClose(decoder);
      fclose(file);
      mp4_ip.output->close_audio();
      /*
      if(positionTable){
	g_free(positionTable); positionTable=0;
      }
      */
      g_free(xmmstitle);
      bPlaying = FALSE;
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }
    //if(bSeek){
    //mp4_ip.set_info(xmmstitle, lenght*1000, -1, samplerate, channels);
      //}else{
    mp4_ip.set_info(xmmstitle, -1, -1, samplerate, channels);
      //}
    mp4_ip.output->flush(0);

    while(bPlaying && buffervalid > 0){
      faacDecFrameInfo	finfo;
      unsigned long	samplesdecoded;
      char*		sample_buffer = NULL;
      /*
	if(bSeek && seekPosition!=-1){
	fseek(file, positionTable[seekPosition], SEEK_SET);
	bufferconsumed=0;
	buffervalid = fread(buffer, 1, BUFFER_SIZE, file);
	aac_ip.output->flush(seekPosition*1000);
	seekPosition=-1;
	}
      */
      if(bufferconsumed > 0){
	memmove(buffer, &buffer[bufferconsumed], buffervalid-bufferconsumed);
	buffervalid -= bufferconsumed;
	buffervalid += fread(&buffer[buffervalid], 1,
			     BUFFER_SIZE-buffervalid, file);
	bufferconsumed = 0;
      }
      sample_buffer = faacDecDecode(decoder, &finfo, buffer, buffervalid);
      if(finfo.error){
	config = faacDecGetCurrentConfiguration(decoder);
	if(config->useOldADTSFormat != 1){
	  faacDecClose(decoder);
	  decoder = faacDecOpen();
	  config = faacDecGetCurrentConfiguration(decoder);
	  config->useOldADTSFormat = 1;
	  faacDecSetConfiguration(decoder, config);
	  finfo.bytesconsumed=0;
	  finfo.samples = 0;
	  faacDecInit(decoder,
		      buffer,
		      buffervalid,
		      &samplerate,
		      &channels);
	}else{
	  g_print("FAAD2 Warning %s\n", faacDecGetErrorMessage(finfo.error));
	  buffervalid = 0;
	}
      }
      bufferconsumed += finfo.bytesconsumed;
      samplesdecoded = finfo.samples;
      if((samplesdecoded<=0) && !sample_buffer){
	g_print("AAC: error sample decoding\n");
	continue;
      }
      while(bPlaying && mp4_ip.output->buffer_free() < (samplesdecoded<<1)){
	xmms_usleep(10000);
      }
      mp4_ip.add_vis_pcm(mp4_ip.output->written_time(),
			 FMT_S16_LE, channels,
			 samplesdecoded<<1, sample_buffer);
      mp4_ip.output->write_audio(sample_buffer, samplesdecoded<<1);
    }
    while(bPlaying && mp4_ip.output->buffer_playing()){
      xmms_usleep(10000);
    }
    mp4_ip.output->buffer_free();
    mp4_ip.output->close_audio();
    bPlaying = FALSE;
    g_free(buffer);
    faacDecClose(decoder);
    g_free(xmmstitle);
    fclose(file);
    seekPosition = -1;
    /*
    if(positionTable){
      g_free(positionTable); positionTable=0;
    }
    */
    bPlaying = FALSE;
    pthread_mutex_unlock(&mutex);
    pthread_exit(NULL);
    
  }
}
예제 #11
0
파일: aac.c 프로젝트: Koss64/deadbeef
// returns -1 for error, 0 for mp4, 1 for aac
int
aac_probe (DB_FILE *fp, const char *fname, MP4FILE_CB *cb, float *duration, int *samplerate, int *channels, int *totalsamples, int *mp4track, MP4FILE *pmp4) {
    // try mp4

    if (mp4track) {
        *mp4track = -1;
    }
    if (*pmp4) {
        *pmp4 = NULL;
    }
    *duration = -1;
#ifdef USE_MP4FF
    mp4ff_t *mp4 = mp4ff_open_read (cb);
#else
    MP4FileHandle mp4 = MP4ReadProvider (fname, 0, cb);
#endif
    if (!mp4) {
        trace ("not an mp4 file\n");
        return -1;
    }
    if (pmp4) {
        *pmp4 = mp4;
    }
#ifdef USE_MP4FF
    int ntracks = mp4ff_total_tracks (mp4);
    if (ntracks > 0) {
        trace ("m4a container detected, ntracks=%d\n", ntracks);
        int i = -1;
        trace ("looking for mp4 data...\n");
        int sr = -1;
        for (i = 0; i < ntracks; i++) {
            unsigned char*  buff = 0;
            unsigned int    buff_size = 0;
            mp4AudioSpecificConfig mp4ASC;
            mp4ff_get_decoder_config(mp4, i, &buff, &buff_size);
            if(buff){
                int rc = AudioSpecificConfig(buff, buff_size, &mp4ASC);
                sr = mp4ASC.samplingFrequency;
                free(buff);
                if(rc < 0)
                    continue;
                break;
            }
        }

        if (i != ntracks) 
        {
            trace ("mp4 track: %d\n", i);
            if (sr != -1) {
                *samplerate = sr;
            }
            else {
                *samplerate = mp4ff_get_sample_rate (mp4, i);
            }
            *channels = mp4ff_get_channel_count (mp4, i);
            int samples = mp4ff_num_samples(mp4, i) * 1024;
            samples = (int64_t)samples * (*samplerate) / mp4ff_time_scale (mp4, i);

            trace ("mp4 nsamples=%d, samplerate=%d, timescale=%d, duration=%lld\n", samples, *samplerate, mp4ff_time_scale(mp4, i), mp4ff_get_track_duration(mp4, i));
            *duration = (float)samples / (*samplerate);

            if (totalsamples) {
                *totalsamples = samples;
            }
            if (mp4track) {
                *mp4track = i;
            }
            if (!*pmp4) {
                mp4ff_close (mp4);
            }
            return 0;
        }
    }
#else
    MP4FileHandle mp4File = mp4;
    MP4TrackId trackId = MP4FindTrackId(mp4File, 0, "audio", 0);
    trace ("trackid: %d\n", trackId);
    uint32_t timeScale = MP4GetTrackTimeScale(mp4File, trackId);
    MP4Duration trackDuration = MP4GetTrackDuration(mp4File, trackId);
    MP4SampleId numSamples = MP4GetTrackNumberOfSamples(mp4File, trackId);
    u_int8_t* pConfig;
    uint32_t configSize = 0;
    bool res = MP4GetTrackESConfiguration(mp4File, trackId, &pConfig, &configSize);
    if (res && pConfig) {
        mp4AudioSpecificConfig mp4ASC;
        int rc = AudioSpecificConfig(pConfig, configSize, &mp4ASC);
        free (pConfig);
        if (rc >= 0) {
            *samplerate = mp4ASC.samplingFrequency;
            *channels = MP4GetTrackAudioChannels (mp4File, trackId);
//            int64_t duration = MP4ConvertFromTrackDuration (mp4File, trackId, trackDuration, timeScale);
            int samples = MP4GetTrackNumberOfSamples (mp4File, trackId) * 1024 * (*channels);
            trace ("mp4 nsamples=%d, timescale=%d, samplerate=%d\n", samples, timeScale, *samplerate);
            *duration = (float)samples / (*samplerate);

            if (totalsamples) {
                *totalsamples = samples;
            }
            if (mp4track) {
                *mp4track = trackId;
            }
            if (!*pmp4) {
                MP4Close (mp4);
            }
            return 0;
        }
    }
#endif
    if (*pmp4) {
        *pmp4 = NULL;
    }

    if (mp4) {
#if USE_MP4FF
        mp4ff_close (mp4);
#else
        MP4Close (mp4);
#endif
        mp4 = NULL;
    }
    trace ("mp4 track not found, looking for aac stream...\n");

    // not an mp4, try raw aac
#if USE_MP4FF
    deadbeef->rewind (fp);
#endif
    if (parse_aac_stream (fp, samplerate, channels, duration, totalsamples) == -1) {
        trace ("aac stream not found\n");
        return -1;
    }
    trace ("found aac stream\n");
    return 1;
}
예제 #12
0
bool MP4Metadata::ReadMetadata(CFErrorRef *error)
{
	// Start from scratch
	CFDictionaryRemoveAllValues(mMetadata);
	CFDictionaryRemoveAllValues(mChangedMetadata);

	UInt8 buf [PATH_MAX];
	if(!CFURLGetFileSystemRepresentation(mURL, FALSE, buf, PATH_MAX))
		return false;
	
	// Open the file for reading
	MP4FileHandle file = MP4Read(reinterpret_cast<const char *>(buf));
	
	if(MP4_INVALID_FILE_HANDLE == file) {
		if(error) {
			CFMutableDictionaryRef errorDictionary = CFDictionaryCreateMutable(kCFAllocatorDefault, 
																			   32,
																			   &kCFTypeDictionaryKeyCallBacks,
																			   &kCFTypeDictionaryValueCallBacks);
			
			CFStringRef displayName = CreateDisplayNameForURL(mURL);
			CFStringRef errorString = CFStringCreateWithFormat(kCFAllocatorDefault, 
															   NULL, 
															   CFCopyLocalizedString(CFSTR("The file “%@” is not a valid MPEG-4 file."), ""), 
															   displayName);
			
			CFDictionarySetValue(errorDictionary, 
								 kCFErrorLocalizedDescriptionKey, 
								 errorString);
			
			CFDictionarySetValue(errorDictionary, 
								 kCFErrorLocalizedFailureReasonKey, 
								 CFCopyLocalizedString(CFSTR("Not an MPEG-4 file"), ""));
			
			CFDictionarySetValue(errorDictionary, 
								 kCFErrorLocalizedRecoverySuggestionKey, 
								 CFCopyLocalizedString(CFSTR("The file's extension may not match the file's type."), ""));
			
			CFRelease(errorString), errorString = NULL;
			CFRelease(displayName), displayName = NULL;
			
			*error = CFErrorCreate(kCFAllocatorDefault, 
								   AudioMetadataErrorDomain, 
								   AudioMetadataFileFormatNotRecognizedError, 
								   errorDictionary);
			
			CFRelease(errorDictionary), errorDictionary = NULL;				
		}
		
		return false;
	}

	// Read the properties
	if(0 < MP4GetNumberOfTracks(file)) {
		// Should be type 'soun', media data name'mp4a'
		MP4TrackId trackID = MP4FindTrackId(file, 0);

		// Verify this is an MPEG-4 audio file
		if(MP4_INVALID_TRACK_ID == trackID || strncmp("soun", MP4GetTrackType(file, trackID), 4)) {
			MP4Close(file), file = NULL;
			
			if(error) {
				CFMutableDictionaryRef errorDictionary = CFDictionaryCreateMutable(kCFAllocatorDefault, 
																				   32,
																				   &kCFTypeDictionaryKeyCallBacks,
																				   &kCFTypeDictionaryValueCallBacks);
				
				CFStringRef displayName = CreateDisplayNameForURL(mURL);
				CFStringRef errorString = CFStringCreateWithFormat(kCFAllocatorDefault, 
																   NULL, 
																   CFCopyLocalizedString(CFSTR("The file “%@” is not a valid MPEG-4 file."), ""), 
																   displayName);
				
				CFDictionarySetValue(errorDictionary, 
									 kCFErrorLocalizedDescriptionKey, 
									 errorString);
				
				CFDictionarySetValue(errorDictionary, 
									 kCFErrorLocalizedFailureReasonKey, 
									 CFCopyLocalizedString(CFSTR("Not an MPEG-4 file"), ""));
				
				CFDictionarySetValue(errorDictionary, 
									 kCFErrorLocalizedRecoverySuggestionKey, 
									 CFCopyLocalizedString(CFSTR("The file's extension may not match the file's type."), ""));
				
				CFRelease(errorString), errorString = NULL;
				CFRelease(displayName), displayName = NULL;
				
				*error = CFErrorCreate(kCFAllocatorDefault, 
									   AudioMetadataErrorDomain, 
									   AudioMetadataFileFormatNotSupportedError, 
									   errorDictionary);
				
				CFRelease(errorDictionary), errorDictionary = NULL;				
			}
			
			return false;
		}
		
		MP4Duration mp4Duration = MP4GetTrackDuration(file, trackID);
		uint32_t mp4TimeScale = MP4GetTrackTimeScale(file, trackID);
		
		CFNumberRef totalFrames = CFNumberCreate(kCFAllocatorDefault, kCFNumberLongLongType, &mp4Duration);
		CFDictionarySetValue(mMetadata, kPropertiesTotalFramesKey, totalFrames);
		CFRelease(totalFrames), totalFrames = NULL;
		
		CFNumberRef sampleRate = CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &mp4TimeScale);
		CFDictionarySetValue(mMetadata, kPropertiesSampleRateKey, sampleRate);
		CFRelease(sampleRate), sampleRate = NULL;
		
		double length = static_cast<double>(mp4Duration / mp4TimeScale);
		CFNumberRef duration = CFNumberCreate(kCFAllocatorDefault, kCFNumberDoubleType, &length);
		CFDictionarySetValue(mMetadata, kPropertiesDurationKey, duration);
		CFRelease(duration), duration = NULL;

		// "mdia.minf.stbl.stsd.*[0].channels"
		int channels = MP4GetTrackAudioChannels(file, trackID);
		CFNumberRef channelsPerFrame = CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &channels);
		CFDictionaryAddValue(mMetadata, kPropertiesChannelsPerFrameKey, channelsPerFrame);
		CFRelease(channelsPerFrame), channelsPerFrame = NULL;

		// ALAC files
		if(MP4HaveTrackAtom(file, trackID, "mdia.minf.stbl.stsd.alac")) {
			CFDictionarySetValue(mMetadata, kPropertiesFormatNameKey, CFSTR("Apple Lossless"));
			
			uint64_t sampleSize;
			uint8_t *decoderConfig;
			uint32_t decoderConfigSize;
			if(MP4GetTrackBytesProperty(file, trackID, "mdia.minf.stbl.stsd.alac.alac.decoderConfig", &decoderConfig, &decoderConfigSize) && 28 <= decoderConfigSize) {
				// The ALAC magic cookie seems to have the following layout (28 bytes, BE):
				// Byte 10: Sample size
				// Bytes 25-28: Sample rate
				CFNumberRef bitsPerChannel = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt8Type, decoderConfig + 9);
				CFDictionaryAddValue(mMetadata, kPropertiesBitsPerChannelKey, bitsPerChannel);
				CFRelease(bitsPerChannel), bitsPerChannel = NULL;

				double losslessBitrate = static_cast<double>(mp4TimeScale * channels * sampleSize) / 1000;
				CFNumberRef bitrate = CFNumberCreate(kCFAllocatorDefault, kCFNumberDoubleType, &losslessBitrate);
				CFDictionarySetValue(mMetadata, kPropertiesBitrateKey, bitrate);
				CFRelease(bitrate), bitrate = NULL;

				free(decoderConfig), decoderConfig = NULL;
			}			
			else if(MP4GetTrackIntegerProperty(file, trackID, "mdia.minf.stbl.stsd.alac.sampleSize", &sampleSize)) {
				CFNumberRef bitsPerChannel = CFNumberCreate(kCFAllocatorDefault, kCFNumberLongLongType, &sampleSize);
				CFDictionaryAddValue(mMetadata, kPropertiesBitsPerChannelKey, bitsPerChannel);
				CFRelease(bitsPerChannel), bitsPerChannel = NULL;

				double losslessBitrate = static_cast<double>(mp4TimeScale * channels * sampleSize) / 1000;
				CFNumberRef bitrate = CFNumberCreate(kCFAllocatorDefault, kCFNumberDoubleType, &losslessBitrate);
				CFDictionarySetValue(mMetadata, kPropertiesBitrateKey, bitrate);
				CFRelease(bitrate), bitrate = NULL;
			}
		}

		// AAC files
		if(MP4HaveTrackAtom(file, trackID, "mdia.minf.stbl.stsd.mp4a")) {
			CFDictionarySetValue(mMetadata, kPropertiesFormatNameKey, CFSTR("AAC"));

			// "mdia.minf.stbl.stsd.*.esds.decConfigDescr.avgBitrate"
			uint32_t trackBitrate = MP4GetTrackBitRate(file, trackID) / 1000;
			CFNumberRef bitrate = CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &trackBitrate);
			CFDictionaryAddValue(mMetadata, kPropertiesBitrateKey, bitrate);
			CFRelease(bitrate), bitrate = NULL;
			
		}
	}
	// No valid tracks in file
	else {
		MP4Close(file), file = NULL;
		
		if(error) {
			CFMutableDictionaryRef errorDictionary = CFDictionaryCreateMutable(kCFAllocatorDefault, 
																			   32,
																			   &kCFTypeDictionaryKeyCallBacks,
																			   &kCFTypeDictionaryValueCallBacks);
			
			CFStringRef displayName = CreateDisplayNameForURL(mURL);
			CFStringRef errorString = CFStringCreateWithFormat(kCFAllocatorDefault, 
															   NULL, 
															   CFCopyLocalizedString(CFSTR("The file “%@” is not a valid MPEG-4 file."), ""), 
															   displayName);
			
			CFDictionarySetValue(errorDictionary, 
								 kCFErrorLocalizedDescriptionKey, 
								 errorString);
			
			CFDictionarySetValue(errorDictionary, 
								 kCFErrorLocalizedFailureReasonKey, 
								 CFCopyLocalizedString(CFSTR("Not an MPEG-4 file"), ""));
			
			CFDictionarySetValue(errorDictionary, 
								 kCFErrorLocalizedRecoverySuggestionKey, 
								 CFCopyLocalizedString(CFSTR("The file's extension may not match the file's type."), ""));
			
			CFRelease(errorString), errorString = NULL;
			CFRelease(displayName), displayName = NULL;
			
			*error = CFErrorCreate(kCFAllocatorDefault, 
								   AudioMetadataErrorDomain, 
								   AudioMetadataFileFormatNotSupportedError, 
								   errorDictionary);
			
			CFRelease(errorDictionary), errorDictionary = NULL;				
		}
		
		return false;
	}

	// Read the tags
	const MP4Tags *tags = MP4TagsAlloc();

	if(NULL == tags) {
		MP4Close(file), file = NULL;

		if(error)
			*error = CFErrorCreate(kCFAllocatorDefault, kCFErrorDomainPOSIX, ENOMEM, NULL);

		return false;
	}
	
	MP4TagsFetch(tags, file);
	
	// Album title
	if(tags->album) {
		CFStringRef str = CFStringCreateWithCString(kCFAllocatorDefault, tags->album, kCFStringEncodingUTF8);
		CFDictionarySetValue(mMetadata, kMetadataAlbumTitleKey, str);
		CFRelease(str), str = NULL;
	}
	
	// Artist
	if(tags->artist) {
		CFStringRef str = CFStringCreateWithCString(kCFAllocatorDefault, tags->artist, kCFStringEncodingUTF8);
		CFDictionarySetValue(mMetadata, kMetadataArtistKey, str);
		CFRelease(str), str = NULL;
	}
	
	// Album Artist
	if(tags->albumArtist) {
		CFStringRef str = CFStringCreateWithCString(kCFAllocatorDefault, tags->albumArtist, kCFStringEncodingUTF8);
		CFDictionarySetValue(mMetadata, kMetadataAlbumArtistKey, str);
		CFRelease(str), str = NULL;
	}
	
	// Genre
	if(tags->genre) {
		CFStringRef str = CFStringCreateWithCString(kCFAllocatorDefault, tags->genre, kCFStringEncodingUTF8);
		CFDictionarySetValue(mMetadata, kMetadataGenreKey, str);
		CFRelease(str), str = NULL;
	}
	
	// Release date
	if(tags->releaseDate) {
		CFStringRef str = CFStringCreateWithCString(kCFAllocatorDefault, tags->releaseDate, kCFStringEncodingUTF8);
		CFDictionarySetValue(mMetadata, kMetadataReleaseDateKey, str);
		CFRelease(str), str = NULL;
	}
	
	// Composer
	if(tags->composer) {
		CFStringRef str = CFStringCreateWithCString(kCFAllocatorDefault, tags->composer, kCFStringEncodingUTF8);
		CFDictionarySetValue(mMetadata, kMetadataComposerKey, str);
		CFRelease(str), str = NULL;
	}
	
	// Comment
	if(tags->comments) {
		CFStringRef str = CFStringCreateWithCString(kCFAllocatorDefault, tags->comments, kCFStringEncodingUTF8);
		CFDictionarySetValue(mMetadata, kMetadataCommentKey, str);
		CFRelease(str), str = NULL;
	}
	
	// Track title
	if(tags->name) {
		CFStringRef str = CFStringCreateWithCString(kCFAllocatorDefault, tags->name, kCFStringEncodingUTF8);
		CFDictionarySetValue(mMetadata, kMetadataTitleKey, str);
		CFRelease(str), str = NULL;
	}
	
	// Track number
	if(tags->track) {
		if(tags->track->index) {
			CFNumberRef num = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt16Type, &tags->track->index);
			CFDictionarySetValue(mMetadata, kMetadataTrackNumberKey, num);
			CFRelease(num), num = NULL;
		}
		
		if(tags->track->total) {
			CFNumberRef num = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt16Type, &tags->track->total);
			CFDictionarySetValue(mMetadata, kMetadataTrackTotalKey, num);
			CFRelease(num), num = NULL;
		}
	}
	
	// Disc number
	if(tags->disk) {
		if(tags->disk->index) {
			CFNumberRef num = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt16Type, &tags->disk->index);
			CFDictionarySetValue(mMetadata, kMetadataDiscNumberKey, num);
			CFRelease(num), num = NULL;
		}
		
		if(tags->disk->total) {
			CFNumberRef num = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt16Type, &tags->disk->total);
			CFDictionarySetValue(mMetadata, kMetadataDiscTotalKey, num);
			CFRelease(num), num = NULL;
		}
	}
	
	// Compilation
	if(tags->compilation)
		CFDictionarySetValue(mMetadata, kMetadataCompilationKey, *(tags->compilation) ? kCFBooleanTrue : kCFBooleanFalse);
	
	// BPM
	if(tags->tempo) {
		
	}
	
	// Lyrics
	if(tags->lyrics) {
		CFStringRef str = CFStringCreateWithCString(kCFAllocatorDefault, tags->lyrics, kCFStringEncodingUTF8);
		CFDictionarySetValue(mMetadata, kMetadataLyricsKey, str);
		CFRelease(str), str = NULL;
	}
	
	// Album art
	if(tags->artworkCount) {
		for(uint32_t i = 0; i < tags->artworkCount; ++i) {
			CFDataRef data = CFDataCreate(kCFAllocatorDefault, reinterpret_cast<const UInt8 *>(tags->artwork[i].data), tags->artwork[i].size);
			CFDictionarySetValue(mMetadata, kAlbumArtFrontCoverKey, data);
			CFRelease(data), data = NULL;
		}
	}
	
	// ReplayGain
	// Reference loudness
	MP4ItmfItemList *items = MP4ItmfGetItemsByMeaning(file, "com.apple.iTunes", "replaygain_reference_loudness");
	if(NULL != items) {
		float referenceLoudnessValue;
		if(1 <= items->size && 1 <= items->elements[0].dataList.size && sscanf(reinterpret_cast<const char *>(items->elements[0].dataList.elements[0].value), "%f", &referenceLoudnessValue)) {
			CFNumberRef referenceLoudness = CFNumberCreate(kCFAllocatorDefault, kCFNumberFloatType, &referenceLoudnessValue);
			CFDictionaryAddValue(mMetadata, kReplayGainReferenceLoudnessKey, referenceLoudness);
			CFRelease(referenceLoudness), referenceLoudness = NULL;
		}
		
		MP4ItmfItemListFree(items), items = NULL;
	}
	
	// Track gain
	items = MP4ItmfGetItemsByMeaning(file, "com.apple.iTunes", "replaygain_track_gain");
	if(NULL != items) {
		float trackGainValue;
		if(1 <= items->size && 1 <= items->elements[0].dataList.size && sscanf(reinterpret_cast<const char *>(items->elements[0].dataList.elements[0].value), "%f", &trackGainValue)) {
			CFNumberRef trackGain = CFNumberCreate(kCFAllocatorDefault, kCFNumberFloatType, &trackGainValue);
			CFDictionaryAddValue(mMetadata, kReplayGainTrackGainKey, trackGain);
			CFRelease(trackGain), trackGain = NULL;
		}
		
		MP4ItmfItemListFree(items), items = NULL;
	}		
	
	// Track peak
	items = MP4ItmfGetItemsByMeaning(file, "com.apple.iTunes", "replaygain_track_peak");
	if(NULL != items) {
		float trackPeakValue;
		if(1 <= items->size && 1 <= items->elements[0].dataList.size && sscanf(reinterpret_cast<const char *>(items->elements[0].dataList.elements[0].value), "%f", &trackPeakValue)) {
			CFNumberRef trackPeak = CFNumberCreate(kCFAllocatorDefault, kCFNumberFloatType, &trackPeakValue);
			CFDictionaryAddValue(mMetadata, kReplayGainTrackPeakKey, trackPeak);
			CFRelease(trackPeak), trackPeak = NULL;
		}
		
		MP4ItmfItemListFree(items), items = NULL;
	}		
	
	// Album gain
	items = MP4ItmfGetItemsByMeaning(file, "com.apple.iTunes", "replaygain_album_gain");
	if(NULL != items) {
		float albumGainValue;
		if(1 <= items->size && 1 <= items->elements[0].dataList.size && sscanf(reinterpret_cast<const char *>(items->elements[0].dataList.elements[0].value), "%f", &albumGainValue)) {
			CFNumberRef albumGain = CFNumberCreate(kCFAllocatorDefault, kCFNumberFloatType, &albumGainValue);
			CFDictionaryAddValue(mMetadata, kReplayGainAlbumGainKey, albumGain);
			CFRelease(albumGain), albumGain = NULL;
		}
		
		MP4ItmfItemListFree(items), items = NULL;
	}		
	
	// Album peak
	items = MP4ItmfGetItemsByMeaning(file, "com.apple.iTunes", "replaygain_album_peak");
	if(NULL != items) {
		float albumPeakValue;
		if(1 <= items->size && 1 <= items->elements[0].dataList.size && sscanf(reinterpret_cast<const char *>(items->elements[0].dataList.elements[0].value), "%f", &albumPeakValue)) {
			CFNumberRef albumPeak = CFNumberCreate(kCFAllocatorDefault, kCFNumberDoubleType, &albumPeakValue);
			CFDictionaryAddValue(mMetadata, kReplayGainAlbumPeakKey, albumPeak);
			CFRelease(albumPeak), albumPeak = NULL;
		}
		
		MP4ItmfItemListFree(items), items = NULL;
	}

	// Clean up
	MP4TagsFree(tags), tags = NULL;
	MP4Close(file), file = NULL;

	return true;
}
void main(int argc, char** argv)
{


	if (argc < 2) {
		fprintf(stderr, "Usage: %s <file>\n", argv[0]);
		exit(1);
	}

	//u_int32_t verbosity = MP4_DETAILS_ALL;
	char* fileName = argv[1];

	// open the mp4 file, and read meta-info
	MP4FileHandle mp4File = MP4Read(fileName  );

	uint8_t profileLevel = MP4GetVideoProfileLevel(mp4File);

	// get a handle on the first video track
	MP4TrackId trackId = MP4FindTrackId(mp4File, 0, "video");

	// gather the crucial track information 

	uint32_t timeScale = MP4GetTrackTimeScale(mp4File, trackId);

	// note all times and durations 
	// are in units of the track time scale

	MP4Duration trackDuration = MP4GetTrackDuration(mp4File, trackId);

	MP4SampleId numSamples = MP4GetTrackNumberOfSamples(mp4File, trackId);

	uint32_t maxSampleSize = MP4GetTrackMaxSampleSize(mp4File, trackId);

	uint8_t* pConfig;
	uint32_t configSize = 0;

	MP4GetTrackESConfiguration(mp4File, trackId, &pConfig, &configSize);

	// initialize decoder with Elementary Stream (ES) configuration

	// done with our copy of ES configuration
	free(pConfig);


	// now consecutively read and display the track samples

	uint8_t* pSample = (uint8_t*)malloc(maxSampleSize);
	uint32_t sampleSize;

	MP4Timestamp sampleTime;
	MP4Duration sampleDuration;
	MP4Duration sampleRenderingOffset;
	bool isSyncSample;

	for (MP4SampleId sampleId = 1; sampleId <= numSamples; sampleId++) {

		// give ReadSample our own buffer, and let it know how big it is
		sampleSize = maxSampleSize;

		// read next sample from video track
		MP4ReadSample(mp4File, trackId, sampleId, 
			&pSample, &sampleSize,
			&sampleTime, &sampleDuration, &sampleRenderingOffset, 
			&isSyncSample);

		// convert timestamp and duration from track time to milliseconds
		uint64_t myTime = MP4ConvertFromTrackTimestamp(mp4File, trackId, 
			sampleTime, MP4_MSECS_TIME_SCALE);

		uint64_t myDuration = MP4ConvertFromTrackDuration(mp4File, trackId,
			sampleDuration, MP4_MSECS_TIME_SCALE);

		// decode frame and display it
	}

	// close mp4 file
	MP4Close(mp4File);


	// Note to seek to time 'when' in the track
	// use MP4GetSampleIdFromTime(MP4FileHandle hFile, 
	//		MP4Timestamp when, bool wantSyncSample)
	// 'wantSyncSample' determines if a sync sample is desired or not
	// e.g.
	// MP4Timestamp when = 
	//	MP4ConvertToTrackTimestamp(mp4File, trackId, 30, MP4_SECS_TIME_SCALE);
	// MP4SampleId newSampleId = MP4GetSampleIdFromTime(mp4File, when, true);
	// MP4ReadSample(mp4File, trackId, newSampleId, ...);
	// 
	// Note that start time for sample may be later than 'when'

	exit(0);
}